diff mbox series

[v9,13/20] virtio-net: Return an error when vhost cannot enable RSS

Message ID 20240403-rss-v9-13-c6d87e69d38b@daynix.com (mailing list archive)
State New, archived
Headers show
Series virtio-net RSS/hash report fixes and improvements | expand

Commit Message

Akihiko Odaki April 3, 2024, 11:11 a.m. UTC
vhost requires eBPF for RSS. When eBPF is not available, virtio-net
implicitly disables RSS even if the user explicitly requests it. Return
an error instead of implicitly disabling RSS if RSS is requested but not
available.

Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
---
 hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
 1 file changed, 48 insertions(+), 49 deletions(-)

Comments

Yuri Benditovich April 7, 2024, 9:46 p.m. UTC | #1
On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> implicitly disables RSS even if the user explicitly requests it. Return
> an error instead of implicitly disabling RSS if RSS is requested but not
> available.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
>  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
>  1 file changed, 48 insertions(+), 49 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 61b49e335dea..3d53eba88cfc 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
>          return features;
>      }
>
> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> -    }
>      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
>      vdev->backend_features = features;
>
> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
>      return qatomic_read(&n->failover_primary_hidden);
>  }
>
> +static void virtio_net_device_unrealize(DeviceState *dev)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> +    VirtIONet *n = VIRTIO_NET(dev);
> +    int i, max_queue_pairs;
> +
> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> +        virtio_net_unload_ebpf(n);
> +    }
> +
> +    /* This will stop vhost backend if appropriate. */
> +    virtio_net_set_status(vdev, 0);
> +
> +    g_free(n->netclient_name);
> +    n->netclient_name = NULL;
> +    g_free(n->netclient_type);
> +    n->netclient_type = NULL;
> +
> +    g_free(n->mac_table.macs);
> +    g_free(n->vlans);
> +
> +    if (n->failover) {
> +        qobject_unref(n->primary_opts);
> +        device_listener_unregister(&n->primary_listener);
> +        migration_remove_notifier(&n->migration_state);
> +    } else {
> +        assert(n->primary_opts == NULL);
> +    }
> +
> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> +    for (i = 0; i < max_queue_pairs; i++) {
> +        virtio_net_del_queue(n, i);
> +    }
> +    /* delete also control vq */
> +    virtio_del_queue(vdev, max_queue_pairs * 2);
> +    qemu_announce_timer_del(&n->announce_timer, false);
> +    g_free(n->vqs);
> +    qemu_del_nic(n->nic);
> +    virtio_net_rsc_cleanup(n);
> +    g_free(n->rss_data.indirections_table);
> +    net_rx_pkt_uninit(n->rx_pkt);
> +    virtio_cleanup(vdev);
> +}
> +
>  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>
>      net_rx_pkt_init(&n->rx_pkt);
>
> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> -        virtio_net_load_ebpf(n);
> -    }
> -}
> -
> -static void virtio_net_device_unrealize(DeviceState *dev)
> -{
> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> -    VirtIONet *n = VIRTIO_NET(dev);
> -    int i, max_queue_pairs;
> -
> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> -        virtio_net_unload_ebpf(n);
> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&

I disagree with this change of qemu behavior.
From my point of view:
- this is not a major problem and it should not be a reason to stop VM execution
- it is enough to disable the RSS feature and continue working. Depending on
  other qemu parameters (number of queues, number of cpus) this might be just
  suboptimal. might be a minor problem and might be not a problem at all
- this change defines rss as _only_ feature whose absence breaks the VM start,
  _all_ other features are dropped silently and only rss is not. Why??
- the series has a title 'Fixes and improvements' . This is not a fix and not an
  improvement, this is significant behavioral change that should be discussed in
  light of future plans regarding rss
- I suggest to remove this change from the series, submit it separately
  and discuss from all the sides




> +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> +        virtio_net_device_unrealize(dev);
> +        error_setg(errp, "Can't load eBPF RSS for vhost");
>      }
> -
> -    /* This will stop vhost backend if appropriate. */
> -    virtio_net_set_status(vdev, 0);
> -
> -    g_free(n->netclient_name);
> -    n->netclient_name = NULL;
> -    g_free(n->netclient_type);
> -    n->netclient_type = NULL;
> -
> -    g_free(n->mac_table.macs);
> -    g_free(n->vlans);
> -
> -    if (n->failover) {
> -        qobject_unref(n->primary_opts);
> -        device_listener_unregister(&n->primary_listener);
> -        migration_remove_notifier(&n->migration_state);
> -    } else {
> -        assert(n->primary_opts == NULL);
> -    }
> -
> -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> -    for (i = 0; i < max_queue_pairs; i++) {
> -        virtio_net_del_queue(n, i);
> -    }
> -    /* delete also control vq */
> -    virtio_del_queue(vdev, max_queue_pairs * 2);
> -    qemu_announce_timer_del(&n->announce_timer, false);
> -    g_free(n->vqs);
> -    qemu_del_nic(n->nic);
> -    virtio_net_rsc_cleanup(n);
> -    g_free(n->rss_data.indirections_table);
> -    net_rx_pkt_uninit(n->rx_pkt);
> -    virtio_cleanup(vdev);
>  }
>
>  static void virtio_net_reset(VirtIODevice *vdev)
>
> --
> 2.44.0
>
Akihiko Odaki April 8, 2024, 1:29 a.m. UTC | #2
On 2024/04/08 6:46, Yuri Benditovich wrote:
> On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
>> implicitly disables RSS even if the user explicitly requests it. Return
>> an error instead of implicitly disabling RSS if RSS is requested but not
>> available.
>>
>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>> ---
>>   hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
>>   1 file changed, 48 insertions(+), 49 deletions(-)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index 61b49e335dea..3d53eba88cfc 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
>>           return features;
>>       }
>>
>> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
>> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
>> -    }
>>       features = vhost_net_get_features(get_vhost_net(nc->peer), features);
>>       vdev->backend_features = features;
>>
>> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
>>       return qatomic_read(&n->failover_primary_hidden);
>>   }
>>
>> +static void virtio_net_device_unrealize(DeviceState *dev)
>> +{
>> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>> +    VirtIONet *n = VIRTIO_NET(dev);
>> +    int i, max_queue_pairs;
>> +
>> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
>> +        virtio_net_unload_ebpf(n);
>> +    }
>> +
>> +    /* This will stop vhost backend if appropriate. */
>> +    virtio_net_set_status(vdev, 0);
>> +
>> +    g_free(n->netclient_name);
>> +    n->netclient_name = NULL;
>> +    g_free(n->netclient_type);
>> +    n->netclient_type = NULL;
>> +
>> +    g_free(n->mac_table.macs);
>> +    g_free(n->vlans);
>> +
>> +    if (n->failover) {
>> +        qobject_unref(n->primary_opts);
>> +        device_listener_unregister(&n->primary_listener);
>> +        migration_remove_notifier(&n->migration_state);
>> +    } else {
>> +        assert(n->primary_opts == NULL);
>> +    }
>> +
>> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
>> +    for (i = 0; i < max_queue_pairs; i++) {
>> +        virtio_net_del_queue(n, i);
>> +    }
>> +    /* delete also control vq */
>> +    virtio_del_queue(vdev, max_queue_pairs * 2);
>> +    qemu_announce_timer_del(&n->announce_timer, false);
>> +    g_free(n->vqs);
>> +    qemu_del_nic(n->nic);
>> +    virtio_net_rsc_cleanup(n);
>> +    g_free(n->rss_data.indirections_table);
>> +    net_rx_pkt_uninit(n->rx_pkt);
>> +    virtio_cleanup(vdev);
>> +}
>> +
>>   static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>>   {
>>       VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>>
>>       net_rx_pkt_init(&n->rx_pkt);
>>
>> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
>> -        virtio_net_load_ebpf(n);
>> -    }
>> -}
>> -
>> -static void virtio_net_device_unrealize(DeviceState *dev)
>> -{
>> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>> -    VirtIONet *n = VIRTIO_NET(dev);
>> -    int i, max_queue_pairs;
>> -
>> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
>> -        virtio_net_unload_ebpf(n);
>> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> 
> I disagree with this change of qemu behavior.
>  From my point of view:
> - this is not a major problem and it should not be a reason to stop VM execution
> - it is enough to disable the RSS feature and continue working. Depending on
>    other qemu parameters (number of queues, number of cpus) this might be just
>    suboptimal. might be a minor problem and might be not a problem at all

The reasoning is that we shouldn't disable what the user explicitly 
requested. c.f., 
https://lore.kernel.org/all/20231102091717-mutt-send-email-mst@kernel.org/

> - this change defines rss as _only_ feature whose absence breaks the VM start,
>    _all_ other features are dropped silently and only rss is not. Why??

I'm following what QEMU does in the other places rather than what it 
does just in virtio-net. I have pointed out virtio-gpu raises errors in 
such a situation. c.f., 
https://lore.kernel.org/all/8880b6f9-f556-46f7-a191-eeec0fe208b0@daynix.com

> - the series has a title 'Fixes and improvements' . This is not a fix and not an
>    improvement, this is significant behavioral change that should be discussed in
>    light of future plans regarding rss
> - I suggest to remove this change from the series, submit it separately
>    and discuss from all the sides

We should have already discussed about these matters; I responded all 
past replies in the previous versions months ago and had no update after 
that. Let's focus on matters that were not previously pointed out.

Regards,
Akihiko Odaki
Yan Vugenfirer April 11, 2024, 11:28 a.m. UTC | #3
On Mon, Apr 8, 2024 at 4:31 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2024/04/08 6:46, Yuri Benditovich wrote:
> > On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> >> implicitly disables RSS even if the user explicitly requests it. Return
> >> an error instead of implicitly disabling RSS if RSS is requested but not
> >> available.
> >>
> >> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >> ---
> >>   hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> >>   1 file changed, 48 insertions(+), 49 deletions(-)
> >>
> >> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> >> index 61b49e335dea..3d53eba88cfc 100644
> >> --- a/hw/net/virtio-net.c
> >> +++ b/hw/net/virtio-net.c
> >> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> >>           return features;
> >>       }
> >>
> >> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> >> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> >> -    }
> >>       features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> >>       vdev->backend_features = features;
> >>
> >> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> >>       return qatomic_read(&n->failover_primary_hidden);
> >>   }
> >>
> >> +static void virtio_net_device_unrealize(DeviceState *dev)
> >> +{
> >> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >> +    VirtIONet *n = VIRTIO_NET(dev);
> >> +    int i, max_queue_pairs;
> >> +
> >> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> >> +        virtio_net_unload_ebpf(n);
> >> +    }
> >> +
> >> +    /* This will stop vhost backend if appropriate. */
> >> +    virtio_net_set_status(vdev, 0);
> >> +
> >> +    g_free(n->netclient_name);
> >> +    n->netclient_name = NULL;
> >> +    g_free(n->netclient_type);
> >> +    n->netclient_type = NULL;
> >> +
> >> +    g_free(n->mac_table.macs);
> >> +    g_free(n->vlans);
> >> +
> >> +    if (n->failover) {
> >> +        qobject_unref(n->primary_opts);
> >> +        device_listener_unregister(&n->primary_listener);
> >> +        migration_remove_notifier(&n->migration_state);
> >> +    } else {
> >> +        assert(n->primary_opts == NULL);
> >> +    }
> >> +
> >> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> >> +    for (i = 0; i < max_queue_pairs; i++) {
> >> +        virtio_net_del_queue(n, i);
> >> +    }
> >> +    /* delete also control vq */
> >> +    virtio_del_queue(vdev, max_queue_pairs * 2);
> >> +    qemu_announce_timer_del(&n->announce_timer, false);
> >> +    g_free(n->vqs);
> >> +    qemu_del_nic(n->nic);
> >> +    virtio_net_rsc_cleanup(n);
> >> +    g_free(n->rss_data.indirections_table);
> >> +    net_rx_pkt_uninit(n->rx_pkt);
> >> +    virtio_cleanup(vdev);
> >> +}
> >> +
> >>   static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >>   {
> >>       VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >>
> >>       net_rx_pkt_init(&n->rx_pkt);
> >>
> >> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> >> -        virtio_net_load_ebpf(n);
> >> -    }
> >> -}
> >> -
> >> -static void virtio_net_device_unrealize(DeviceState *dev)
> >> -{
> >> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >> -    VirtIONet *n = VIRTIO_NET(dev);
> >> -    int i, max_queue_pairs;
> >> -
> >> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> >> -        virtio_net_unload_ebpf(n);
> >> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> >
> > I disagree with this change of qemu behavior.
> >  From my point of view:
> > - this is not a major problem and it should not be a reason to stop VM execution
> > - it is enough to disable the RSS feature and continue working. Depending on
> >    other qemu parameters (number of queues, number of cpus) this might be just
> >    suboptimal. might be a minor problem and might be not a problem at all
>

I think the basic example is when the kernel doesn't support ebpf
loading (either old kernel or kernel that, for some security reason,
disabled the capability). In this case, we will get a behavior change.

Best regards,
Yan.

>
> The reasoning is that we shouldn't disable what the user explicitly
> requested. c.f.,
> https://lore.kernel.org/all/20231102091717-mutt-send-email-mst@kernel.org/
>
> > - this change defines rss as _only_ feature whose absence breaks the VM start,
> >    _all_ other features are dropped silently and only rss is not. Why??
>
> I'm following what QEMU does in the other places rather than what it
> does just in virtio-net. I have pointed out virtio-gpu raises errors in
> such a situation. c.f.,
> https://lore.kernel.org/all/8880b6f9-f556-46f7-a191-eeec0fe208b0@daynix.com
>
> > - the series has a title 'Fixes and improvements' . This is not a fix and not an
> >    improvement, this is significant behavioral change that should be discussed in
> >    light of future plans regarding rss
> > - I suggest to remove this change from the series, submit it separately
> >    and discuss from all the sides
>
> We should have already discussed about these matters; I responded all
> past replies in the previous versions months ago and had no update after
> that. Let's focus on matters that were not previously pointed out.
>
> Regards,
> Akihiko Odaki
>
Yuri Benditovich April 15, 2024, 2:05 p.m. UTC | #4
On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> implicitly disables RSS even if the user explicitly requests it. Return
> an error instead of implicitly disabling RSS if RSS is requested but not
> available.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
>  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
>  1 file changed, 48 insertions(+), 49 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 61b49e335dea..3d53eba88cfc 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
>          return features;
>      }
>
> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> -    }
>      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
>      vdev->backend_features = features;
>
> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
>      return qatomic_read(&n->failover_primary_hidden);
>  }
>
> +static void virtio_net_device_unrealize(DeviceState *dev)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> +    VirtIONet *n = VIRTIO_NET(dev);
> +    int i, max_queue_pairs;
> +
> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> +        virtio_net_unload_ebpf(n);
> +    }
> +
> +    /* This will stop vhost backend if appropriate. */
> +    virtio_net_set_status(vdev, 0);
> +
> +    g_free(n->netclient_name);
> +    n->netclient_name = NULL;
> +    g_free(n->netclient_type);
> +    n->netclient_type = NULL;
> +
> +    g_free(n->mac_table.macs);
> +    g_free(n->vlans);
> +
> +    if (n->failover) {
> +        qobject_unref(n->primary_opts);
> +        device_listener_unregister(&n->primary_listener);
> +        migration_remove_notifier(&n->migration_state);
> +    } else {
> +        assert(n->primary_opts == NULL);
> +    }
> +
> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> +    for (i = 0; i < max_queue_pairs; i++) {
> +        virtio_net_del_queue(n, i);
> +    }
> +    /* delete also control vq */
> +    virtio_del_queue(vdev, max_queue_pairs * 2);
> +    qemu_announce_timer_del(&n->announce_timer, false);
> +    g_free(n->vqs);
> +    qemu_del_nic(n->nic);
> +    virtio_net_rsc_cleanup(n);
> +    g_free(n->rss_data.indirections_table);
> +    net_rx_pkt_uninit(n->rx_pkt);
> +    virtio_cleanup(vdev);
> +}
> +
>  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>
>      net_rx_pkt_init(&n->rx_pkt);
>
> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> -        virtio_net_load_ebpf(n);
> -    }
> -}
> -
> -static void virtio_net_device_unrealize(DeviceState *dev)
> -{
> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> -    VirtIONet *n = VIRTIO_NET(dev);
> -    int i, max_queue_pairs;
> -
> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> -        virtio_net_unload_ebpf(n);
> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> +        virtio_net_device_unrealize(dev);
> +        error_setg(errp, "Can't load eBPF RSS for vhost");
>      }

As I already mentioned, I think this is an extremely bad idea to
fail to run qemu due to such a reason as .absence of one feature.
What I suggest is:
1. Redefine rss as tri-state (off|auto|on)
2. Fail to run only if rss is on and not available via ebpf
3. On auto - silently drop it
4. The same with 'hash' option - it is not compatible with vhost (at
least at the moment)
5. Reformat the patch as it is hard to review it due to replacing
entire procedures, i.e. one patch with replacing without changes,
another one - with real changes.
If this is hard to review only for me - please ignore that.

> -
> -    /* This will stop vhost backend if appropriate. */
> -    virtio_net_set_status(vdev, 0);
> -
> -    g_free(n->netclient_name);
> -    n->netclient_name = NULL;
> -    g_free(n->netclient_type);
> -    n->netclient_type = NULL;
> -
> -    g_free(n->mac_table.macs);
> -    g_free(n->vlans);
> -
> -    if (n->failover) {
> -        qobject_unref(n->primary_opts);
> -        device_listener_unregister(&n->primary_listener);
> -        migration_remove_notifier(&n->migration_state);
> -    } else {
> -        assert(n->primary_opts == NULL);
> -    }
> -
> -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> -    for (i = 0; i < max_queue_pairs; i++) {
> -        virtio_net_del_queue(n, i);
> -    }
> -    /* delete also control vq */
> -    virtio_del_queue(vdev, max_queue_pairs * 2);
> -    qemu_announce_timer_del(&n->announce_timer, false);
> -    g_free(n->vqs);
> -    qemu_del_nic(n->nic);
> -    virtio_net_rsc_cleanup(n);
> -    g_free(n->rss_data.indirections_table);
> -    net_rx_pkt_uninit(n->rx_pkt);
> -    virtio_cleanup(vdev);
>  }
>
>  static void virtio_net_reset(VirtIODevice *vdev)
>
> --
> 2.44.0
>
Jason Wang April 16, 2024, 4 a.m. UTC | #5
On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
<yuri.benditovich@daynix.com> wrote:
>
> On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >
> > vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> > implicitly disables RSS even if the user explicitly requests it. Return
> > an error instead of implicitly disabling RSS if RSS is requested but not
> > available.
> >
> > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > ---
> >  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> >  1 file changed, 48 insertions(+), 49 deletions(-)
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 61b49e335dea..3d53eba88cfc 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> >          return features;
> >      }
> >
> > -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > -    }
> >      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> >      vdev->backend_features = features;
> >
> > @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> >      return qatomic_read(&n->failover_primary_hidden);
> >  }
> >
> > +static void virtio_net_device_unrealize(DeviceState *dev)
> > +{
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > +    VirtIONet *n = VIRTIO_NET(dev);
> > +    int i, max_queue_pairs;
> > +
> > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > +        virtio_net_unload_ebpf(n);
> > +    }
> > +
> > +    /* This will stop vhost backend if appropriate. */
> > +    virtio_net_set_status(vdev, 0);
> > +
> > +    g_free(n->netclient_name);
> > +    n->netclient_name = NULL;
> > +    g_free(n->netclient_type);
> > +    n->netclient_type = NULL;
> > +
> > +    g_free(n->mac_table.macs);
> > +    g_free(n->vlans);
> > +
> > +    if (n->failover) {
> > +        qobject_unref(n->primary_opts);
> > +        device_listener_unregister(&n->primary_listener);
> > +        migration_remove_notifier(&n->migration_state);
> > +    } else {
> > +        assert(n->primary_opts == NULL);
> > +    }
> > +
> > +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > +    for (i = 0; i < max_queue_pairs; i++) {
> > +        virtio_net_del_queue(n, i);
> > +    }
> > +    /* delete also control vq */
> > +    virtio_del_queue(vdev, max_queue_pairs * 2);
> > +    qemu_announce_timer_del(&n->announce_timer, false);
> > +    g_free(n->vqs);
> > +    qemu_del_nic(n->nic);
> > +    virtio_net_rsc_cleanup(n);
> > +    g_free(n->rss_data.indirections_table);
> > +    net_rx_pkt_uninit(n->rx_pkt);
> > +    virtio_cleanup(vdev);
> > +}
> > +
> >  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >  {
> >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >
> >      net_rx_pkt_init(&n->rx_pkt);
> >
> > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > -        virtio_net_load_ebpf(n);
> > -    }
> > -}
> > -
> > -static void virtio_net_device_unrealize(DeviceState *dev)
> > -{
> > -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > -    VirtIONet *n = VIRTIO_NET(dev);
> > -    int i, max_queue_pairs;
> > -
> > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > -        virtio_net_unload_ebpf(n);
> > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> > +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> > +        virtio_net_device_unrealize(dev);
> > +        error_setg(errp, "Can't load eBPF RSS for vhost");
> >      }
>
> As I already mentioned, I think this is an extremely bad idea to
> fail to run qemu due to such a reason as .absence of one feature.
> What I suggest is:
> 1. Redefine rss as tri-state (off|auto|on)
> 2. Fail to run only if rss is on and not available via ebpf
> 3. On auto - silently drop it

"Auto" might be promatic for migration compatibility which is hard to
be used by management layers like libvirt. The reason is that there's
no way for libvirt to know if it is supported by device or not.

Thanks

> 4. The same with 'hash' option - it is not compatible with vhost (at
> least at the moment)
> 5. Reformat the patch as it is hard to review it due to replacing
> entire procedures, i.e. one patch with replacing without changes,
> another one - with real changes.
> If this is hard to review only for me - please ignore that.
>
> > -
> > -    /* This will stop vhost backend if appropriate. */
> > -    virtio_net_set_status(vdev, 0);
> > -
> > -    g_free(n->netclient_name);
> > -    n->netclient_name = NULL;
> > -    g_free(n->netclient_type);
> > -    n->netclient_type = NULL;
> > -
> > -    g_free(n->mac_table.macs);
> > -    g_free(n->vlans);
> > -
> > -    if (n->failover) {
> > -        qobject_unref(n->primary_opts);
> > -        device_listener_unregister(&n->primary_listener);
> > -        migration_remove_notifier(&n->migration_state);
> > -    } else {
> > -        assert(n->primary_opts == NULL);
> > -    }
> > -
> > -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > -    for (i = 0; i < max_queue_pairs; i++) {
> > -        virtio_net_del_queue(n, i);
> > -    }
> > -    /* delete also control vq */
> > -    virtio_del_queue(vdev, max_queue_pairs * 2);
> > -    qemu_announce_timer_del(&n->announce_timer, false);
> > -    g_free(n->vqs);
> > -    qemu_del_nic(n->nic);
> > -    virtio_net_rsc_cleanup(n);
> > -    g_free(n->rss_data.indirections_table);
> > -    net_rx_pkt_uninit(n->rx_pkt);
> > -    virtio_cleanup(vdev);
> >  }
> >
> >  static void virtio_net_reset(VirtIODevice *vdev)
> >
> > --
> > 2.44.0
> >
>
Yuri Benditovich April 16, 2024, 5:43 a.m. UTC | #6
On Tue, Apr 16, 2024 at 7:00 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> <yuri.benditovich@daynix.com> wrote:
> >
> > On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> > >
> > > vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> > > implicitly disables RSS even if the user explicitly requests it. Return
> > > an error instead of implicitly disabling RSS if RSS is requested but not
> > > available.
> > >
> > > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > > ---
> > >  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> > >  1 file changed, 48 insertions(+), 49 deletions(-)
> > >
> > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > index 61b49e335dea..3d53eba88cfc 100644
> > > --- a/hw/net/virtio-net.c
> > > +++ b/hw/net/virtio-net.c
> > > @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> > >          return features;
> > >      }
> > >
> > > -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > > -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > > -    }
> > >      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> > >      vdev->backend_features = features;
> > >
> > > @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> > >      return qatomic_read(&n->failover_primary_hidden);
> > >  }
> > >
> > > +static void virtio_net_device_unrealize(DeviceState *dev)
> > > +{
> > > +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > +    VirtIONet *n = VIRTIO_NET(dev);
> > > +    int i, max_queue_pairs;
> > > +
> > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > +        virtio_net_unload_ebpf(n);
> > > +    }
> > > +
> > > +    /* This will stop vhost backend if appropriate. */
> > > +    virtio_net_set_status(vdev, 0);
> > > +
> > > +    g_free(n->netclient_name);
> > > +    n->netclient_name = NULL;
> > > +    g_free(n->netclient_type);
> > > +    n->netclient_type = NULL;
> > > +
> > > +    g_free(n->mac_table.macs);
> > > +    g_free(n->vlans);
> > > +
> > > +    if (n->failover) {
> > > +        qobject_unref(n->primary_opts);
> > > +        device_listener_unregister(&n->primary_listener);
> > > +        migration_remove_notifier(&n->migration_state);
> > > +    } else {
> > > +        assert(n->primary_opts == NULL);
> > > +    }
> > > +
> > > +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > +    for (i = 0; i < max_queue_pairs; i++) {
> > > +        virtio_net_del_queue(n, i);
> > > +    }
> > > +    /* delete also control vq */
> > > +    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > +    qemu_announce_timer_del(&n->announce_timer, false);
> > > +    g_free(n->vqs);
> > > +    qemu_del_nic(n->nic);
> > > +    virtio_net_rsc_cleanup(n);
> > > +    g_free(n->rss_data.indirections_table);
> > > +    net_rx_pkt_uninit(n->rx_pkt);
> > > +    virtio_cleanup(vdev);
> > > +}
> > > +
> > >  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > >  {
> > >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > >
> > >      net_rx_pkt_init(&n->rx_pkt);
> > >
> > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > -        virtio_net_load_ebpf(n);
> > > -    }
> > > -}
> > > -
> > > -static void virtio_net_device_unrealize(DeviceState *dev)
> > > -{
> > > -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > -    VirtIONet *n = VIRTIO_NET(dev);
> > > -    int i, max_queue_pairs;
> > > -
> > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > -        virtio_net_unload_ebpf(n);
> > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> > > +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> > > +        virtio_net_device_unrealize(dev);
> > > +        error_setg(errp, "Can't load eBPF RSS for vhost");
> > >      }
> >
> > As I already mentioned, I think this is an extremely bad idea to
> > fail to run qemu due to such a reason as .absence of one feature.
> > What I suggest is:
> > 1. Redefine rss as tri-state (off|auto|on)
> > 2. Fail to run only if rss is on and not available via ebpf
> > 3. On auto - silently drop it
>
> "Auto" might be promatic for migration compatibility which is hard to
> be used by management layers like libvirt. The reason is that there's
> no way for libvirt to know if it is supported by device or not.

In terms of migration every feature that somehow depends on the kernel
is problematic, not only RSS. Last time we added the USO feature - is
it different?
And in terms of migration "rss=on" is problematic the same way as "rss=auto".
Can you please show one scenario of migration where they will behave
differently? And in terms of regular experience there is a big advantage.


>
> Thanks
>
> > 4. The same with 'hash' option - it is not compatible with vhost (at
> > least at the moment)
> > 5. Reformat the patch as it is hard to review it due to replacing
> > entire procedures, i.e. one patch with replacing without changes,
> > another one - with real changes.
> > If this is hard to review only for me - please ignore that.
> >
> > > -
> > > -    /* This will stop vhost backend if appropriate. */
> > > -    virtio_net_set_status(vdev, 0);
> > > -
> > > -    g_free(n->netclient_name);
> > > -    n->netclient_name = NULL;
> > > -    g_free(n->netclient_type);
> > > -    n->netclient_type = NULL;
> > > -
> > > -    g_free(n->mac_table.macs);
> > > -    g_free(n->vlans);
> > > -
> > > -    if (n->failover) {
> > > -        qobject_unref(n->primary_opts);
> > > -        device_listener_unregister(&n->primary_listener);
> > > -        migration_remove_notifier(&n->migration_state);
> > > -    } else {
> > > -        assert(n->primary_opts == NULL);
> > > -    }
> > > -
> > > -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > -    for (i = 0; i < max_queue_pairs; i++) {
> > > -        virtio_net_del_queue(n, i);
> > > -    }
> > > -    /* delete also control vq */
> > > -    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > -    qemu_announce_timer_del(&n->announce_timer, false);
> > > -    g_free(n->vqs);
> > > -    qemu_del_nic(n->nic);
> > > -    virtio_net_rsc_cleanup(n);
> > > -    g_free(n->rss_data.indirections_table);
> > > -    net_rx_pkt_uninit(n->rx_pkt);
> > > -    virtio_cleanup(vdev);
> > >  }
> > >
> > >  static void virtio_net_reset(VirtIODevice *vdev)
> > >
> > > --
> > > 2.44.0
> > >
> >
>
Akihiko Odaki April 16, 2024, 6:54 a.m. UTC | #7
On 2024/04/16 13:00, Jason Wang wrote:
> On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> <yuri.benditovich@daynix.com> wrote:
>>
>> On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>
>>> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
>>> implicitly disables RSS even if the user explicitly requests it. Return
>>> an error instead of implicitly disabling RSS if RSS is requested but not
>>> available.
>>>
>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>> ---
>>>   hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
>>>   1 file changed, 48 insertions(+), 49 deletions(-)
>>>
>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>> index 61b49e335dea..3d53eba88cfc 100644
>>> --- a/hw/net/virtio-net.c
>>> +++ b/hw/net/virtio-net.c
>>> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
>>>           return features;
>>>       }
>>>
>>> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
>>> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
>>> -    }
>>>       features = vhost_net_get_features(get_vhost_net(nc->peer), features);
>>>       vdev->backend_features = features;
>>>
>>> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
>>>       return qatomic_read(&n->failover_primary_hidden);
>>>   }
>>>
>>> +static void virtio_net_device_unrealize(DeviceState *dev)
>>> +{
>>> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>>> +    VirtIONet *n = VIRTIO_NET(dev);
>>> +    int i, max_queue_pairs;
>>> +
>>> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
>>> +        virtio_net_unload_ebpf(n);
>>> +    }
>>> +
>>> +    /* This will stop vhost backend if appropriate. */
>>> +    virtio_net_set_status(vdev, 0);
>>> +
>>> +    g_free(n->netclient_name);
>>> +    n->netclient_name = NULL;
>>> +    g_free(n->netclient_type);
>>> +    n->netclient_type = NULL;
>>> +
>>> +    g_free(n->mac_table.macs);
>>> +    g_free(n->vlans);
>>> +
>>> +    if (n->failover) {
>>> +        qobject_unref(n->primary_opts);
>>> +        device_listener_unregister(&n->primary_listener);
>>> +        migration_remove_notifier(&n->migration_state);
>>> +    } else {
>>> +        assert(n->primary_opts == NULL);
>>> +    }
>>> +
>>> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
>>> +    for (i = 0; i < max_queue_pairs; i++) {
>>> +        virtio_net_del_queue(n, i);
>>> +    }
>>> +    /* delete also control vq */
>>> +    virtio_del_queue(vdev, max_queue_pairs * 2);
>>> +    qemu_announce_timer_del(&n->announce_timer, false);
>>> +    g_free(n->vqs);
>>> +    qemu_del_nic(n->nic);
>>> +    virtio_net_rsc_cleanup(n);
>>> +    g_free(n->rss_data.indirections_table);
>>> +    net_rx_pkt_uninit(n->rx_pkt);
>>> +    virtio_cleanup(vdev);
>>> +}
>>> +
>>>   static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>>>   {
>>>       VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>>> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>>>
>>>       net_rx_pkt_init(&n->rx_pkt);
>>>
>>> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
>>> -        virtio_net_load_ebpf(n);
>>> -    }
>>> -}
>>> -
>>> -static void virtio_net_device_unrealize(DeviceState *dev)
>>> -{
>>> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>>> -    VirtIONet *n = VIRTIO_NET(dev);
>>> -    int i, max_queue_pairs;
>>> -
>>> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
>>> -        virtio_net_unload_ebpf(n);
>>> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
>>> +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
>>> +        virtio_net_device_unrealize(dev);
>>> +        error_setg(errp, "Can't load eBPF RSS for vhost");
>>>       }
>>
>> As I already mentioned, I think this is an extremely bad idea to
>> fail to run qemu due to such a reason as .absence of one feature.
>> What I suggest is:
>> 1. Redefine rss as tri-state (off|auto|on)
>> 2. Fail to run only if rss is on and not available via ebpf
>> 3. On auto - silently drop it
> 
> "Auto" might be promatic for migration compatibility which is hard to
> be used by management layers like libvirt. The reason is that there's
> no way for libvirt to know if it is supported by device or not.

Certainly auto is not good for migration, but it is useful in the other 
situations. You can still set "on" or "off" if you care migration. I'll 
add "auto" support in the next version.

> 
> Thanks
> 
>> 4. The same with 'hash' option - it is not compatible with vhost (at
>> least at the moment)
>> 5. Reformat the patch as it is hard to review it due to replacing
>> entire procedures, i.e. one patch with replacing without changes,
>> another one - with real changes. >> If this is hard to review only for me - please ignore that.

I'll split this patch accordingly in the next version.

Regards,
Akihiko Odak
Jason Wang April 16, 2024, 7:13 a.m. UTC | #8
On Tue, Apr 16, 2024 at 1:43 PM Yuri Benditovich
<yuri.benditovich@daynix.com> wrote:
>
> On Tue, Apr 16, 2024 at 7:00 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> > <yuri.benditovich@daynix.com> wrote:
> > >
> > > On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> > > >
> > > > vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> > > > implicitly disables RSS even if the user explicitly requests it. Return
> > > > an error instead of implicitly disabling RSS if RSS is requested but not
> > > > available.
> > > >
> > > > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > > > ---
> > > >  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> > > >  1 file changed, 48 insertions(+), 49 deletions(-)
> > > >
> > > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > > index 61b49e335dea..3d53eba88cfc 100644
> > > > --- a/hw/net/virtio-net.c
> > > > +++ b/hw/net/virtio-net.c
> > > > @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> > > >          return features;
> > > >      }
> > > >
> > > > -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > > > -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > > > -    }
> > > >      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> > > >      vdev->backend_features = features;
> > > >
> > > > @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> > > >      return qatomic_read(&n->failover_primary_hidden);
> > > >  }
> > > >
> > > > +static void virtio_net_device_unrealize(DeviceState *dev)
> > > > +{
> > > > +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > +    VirtIONet *n = VIRTIO_NET(dev);
> > > > +    int i, max_queue_pairs;
> > > > +
> > > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > +        virtio_net_unload_ebpf(n);
> > > > +    }
> > > > +
> > > > +    /* This will stop vhost backend if appropriate. */
> > > > +    virtio_net_set_status(vdev, 0);
> > > > +
> > > > +    g_free(n->netclient_name);
> > > > +    n->netclient_name = NULL;
> > > > +    g_free(n->netclient_type);
> > > > +    n->netclient_type = NULL;
> > > > +
> > > > +    g_free(n->mac_table.macs);
> > > > +    g_free(n->vlans);
> > > > +
> > > > +    if (n->failover) {
> > > > +        qobject_unref(n->primary_opts);
> > > > +        device_listener_unregister(&n->primary_listener);
> > > > +        migration_remove_notifier(&n->migration_state);
> > > > +    } else {
> > > > +        assert(n->primary_opts == NULL);
> > > > +    }
> > > > +
> > > > +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > > +    for (i = 0; i < max_queue_pairs; i++) {
> > > > +        virtio_net_del_queue(n, i);
> > > > +    }
> > > > +    /* delete also control vq */
> > > > +    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > > +    qemu_announce_timer_del(&n->announce_timer, false);
> > > > +    g_free(n->vqs);
> > > > +    qemu_del_nic(n->nic);
> > > > +    virtio_net_rsc_cleanup(n);
> > > > +    g_free(n->rss_data.indirections_table);
> > > > +    net_rx_pkt_uninit(n->rx_pkt);
> > > > +    virtio_cleanup(vdev);
> > > > +}
> > > > +
> > > >  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > >  {
> > > >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > >
> > > >      net_rx_pkt_init(&n->rx_pkt);
> > > >
> > > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > -        virtio_net_load_ebpf(n);
> > > > -    }
> > > > -}
> > > > -
> > > > -static void virtio_net_device_unrealize(DeviceState *dev)
> > > > -{
> > > > -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > -    VirtIONet *n = VIRTIO_NET(dev);
> > > > -    int i, max_queue_pairs;
> > > > -
> > > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > -        virtio_net_unload_ebpf(n);
> > > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> > > > +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> > > > +        virtio_net_device_unrealize(dev);
> > > > +        error_setg(errp, "Can't load eBPF RSS for vhost");
> > > >      }
> > >
> > > As I already mentioned, I think this is an extremely bad idea to
> > > fail to run qemu due to such a reason as .absence of one feature.
> > > What I suggest is:
> > > 1. Redefine rss as tri-state (off|auto|on)
> > > 2. Fail to run only if rss is on and not available via ebpf
> > > 3. On auto - silently drop it
> >
> > "Auto" might be promatic for migration compatibility which is hard to
> > be used by management layers like libvirt. The reason is that there's
> > no way for libvirt to know if it is supported by device or not.
>
> In terms of migration every feature that somehow depends on the kernel
> is problematic, not only RSS.

True, but if we can avoid more, it would still be better.

> Last time we added the USO feature - is
> it different?

I may miss something but we never define tristate for USO?

    DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features,
                      VIRTIO_NET_F_GUEST_USO4, true),
    DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features,
                      VIRTIO_NET_F_GUEST_USO6, true),
    DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
                      VIRTIO_NET_F_HOST_USO, true),

?
> And in terms of migration "rss=on" is problematic the same way as "rss=auto".

Failing early when launching Qemu is better than failing silently as a
guest after a migration.

> Can you please show one scenario of migration where they will behave
> differently?

If you mean the problem of "auto", here's one:

Assuming auto is used in both src and dst. On source, rss is enabled
but not destination. RSS failed to work after migration.

> And in terms of regular experience there is a big advantage.

Similarly, silent clearing a feature is also not good:

    if (!peer_has_vnet_hdr(n)) {
        virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);

        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);

        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);

        virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
    }

The reason we never see complaints is probably because vhost/TAP are
the only backend that supports migration where vnet support there has
been more than a decade.

Thanks


>
>
> >
> > Thanks
> >
> > > 4. The same with 'hash' option - it is not compatible with vhost (at
> > > least at the moment)
> > > 5. Reformat the patch as it is hard to review it due to replacing
> > > entire procedures, i.e. one patch with replacing without changes,
> > > another one - with real changes.
> > > If this is hard to review only for me - please ignore that.
> > >
> > > > -
> > > > -    /* This will stop vhost backend if appropriate. */
> > > > -    virtio_net_set_status(vdev, 0);
> > > > -
> > > > -    g_free(n->netclient_name);
> > > > -    n->netclient_name = NULL;
> > > > -    g_free(n->netclient_type);
> > > > -    n->netclient_type = NULL;
> > > > -
> > > > -    g_free(n->mac_table.macs);
> > > > -    g_free(n->vlans);
> > > > -
> > > > -    if (n->failover) {
> > > > -        qobject_unref(n->primary_opts);
> > > > -        device_listener_unregister(&n->primary_listener);
> > > > -        migration_remove_notifier(&n->migration_state);
> > > > -    } else {
> > > > -        assert(n->primary_opts == NULL);
> > > > -    }
> > > > -
> > > > -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > > -    for (i = 0; i < max_queue_pairs; i++) {
> > > > -        virtio_net_del_queue(n, i);
> > > > -    }
> > > > -    /* delete also control vq */
> > > > -    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > > -    qemu_announce_timer_del(&n->announce_timer, false);
> > > > -    g_free(n->vqs);
> > > > -    qemu_del_nic(n->nic);
> > > > -    virtio_net_rsc_cleanup(n);
> > > > -    g_free(n->rss_data.indirections_table);
> > > > -    net_rx_pkt_uninit(n->rx_pkt);
> > > > -    virtio_cleanup(vdev);
> > > >  }
> > > >
> > > >  static void virtio_net_reset(VirtIODevice *vdev)
> > > >
> > > > --
> > > > 2.44.0
> > > >
> > >
> >
>
Yuri Benditovich April 16, 2024, 9:50 a.m. UTC | #9
On Tue, Apr 16, 2024 at 10:14 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Tue, Apr 16, 2024 at 1:43 PM Yuri Benditovich
> <yuri.benditovich@daynix.com> wrote:
> >
> > On Tue, Apr 16, 2024 at 7:00 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> > > <yuri.benditovich@daynix.com> wrote:
> > > >
> > > > On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> > > > >
> > > > > vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> > > > > implicitly disables RSS even if the user explicitly requests it. Return
> > > > > an error instead of implicitly disabling RSS if RSS is requested but not
> > > > > available.
> > > > >
> > > > > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > > > > ---
> > > > >  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> > > > >  1 file changed, 48 insertions(+), 49 deletions(-)
> > > > >
> > > > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > > > index 61b49e335dea..3d53eba88cfc 100644
> > > > > --- a/hw/net/virtio-net.c
> > > > > +++ b/hw/net/virtio-net.c
> > > > > @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> > > > >          return features;
> > > > >      }
> > > > >
> > > > > -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > > > > -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > > > > -    }
> > > > >      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> > > > >      vdev->backend_features = features;
> > > > >
> > > > > @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> > > > >      return qatomic_read(&n->failover_primary_hidden);
> > > > >  }
> > > > >
> > > > > +static void virtio_net_device_unrealize(DeviceState *dev)
> > > > > +{
> > > > > +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > > +    VirtIONet *n = VIRTIO_NET(dev);
> > > > > +    int i, max_queue_pairs;
> > > > > +
> > > > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > > +        virtio_net_unload_ebpf(n);
> > > > > +    }
> > > > > +
> > > > > +    /* This will stop vhost backend if appropriate. */
> > > > > +    virtio_net_set_status(vdev, 0);
> > > > > +
> > > > > +    g_free(n->netclient_name);
> > > > > +    n->netclient_name = NULL;
> > > > > +    g_free(n->netclient_type);
> > > > > +    n->netclient_type = NULL;
> > > > > +
> > > > > +    g_free(n->mac_table.macs);
> > > > > +    g_free(n->vlans);
> > > > > +
> > > > > +    if (n->failover) {
> > > > > +        qobject_unref(n->primary_opts);
> > > > > +        device_listener_unregister(&n->primary_listener);
> > > > > +        migration_remove_notifier(&n->migration_state);
> > > > > +    } else {
> > > > > +        assert(n->primary_opts == NULL);
> > > > > +    }
> > > > > +
> > > > > +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > > > +    for (i = 0; i < max_queue_pairs; i++) {
> > > > > +        virtio_net_del_queue(n, i);
> > > > > +    }
> > > > > +    /* delete also control vq */
> > > > > +    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > > > +    qemu_announce_timer_del(&n->announce_timer, false);
> > > > > +    g_free(n->vqs);
> > > > > +    qemu_del_nic(n->nic);
> > > > > +    virtio_net_rsc_cleanup(n);
> > > > > +    g_free(n->rss_data.indirections_table);
> > > > > +    net_rx_pkt_uninit(n->rx_pkt);
> > > > > +    virtio_cleanup(vdev);
> > > > > +}
> > > > > +
> > > > >  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > > >  {
> > > > >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > > @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > > >
> > > > >      net_rx_pkt_init(&n->rx_pkt);
> > > > >
> > > > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > > -        virtio_net_load_ebpf(n);
> > > > > -    }
> > > > > -}
> > > > > -
> > > > > -static void virtio_net_device_unrealize(DeviceState *dev)
> > > > > -{
> > > > > -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > > -    VirtIONet *n = VIRTIO_NET(dev);
> > > > > -    int i, max_queue_pairs;
> > > > > -
> > > > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > > -        virtio_net_unload_ebpf(n);
> > > > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> > > > > +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> > > > > +        virtio_net_device_unrealize(dev);
> > > > > +        error_setg(errp, "Can't load eBPF RSS for vhost");
> > > > >      }
> > > >
> > > > As I already mentioned, I think this is an extremely bad idea to
> > > > fail to run qemu due to such a reason as .absence of one feature.
> > > > What I suggest is:
> > > > 1. Redefine rss as tri-state (off|auto|on)
> > > > 2. Fail to run only if rss is on and not available via ebpf
> > > > 3. On auto - silently drop it
> > >
> > > "Auto" might be promatic for migration compatibility which is hard to
> > > be used by management layers like libvirt. The reason is that there's
> > > no way for libvirt to know if it is supported by device or not.
> >
> > In terms of migration every feature that somehow depends on the kernel
> > is problematic, not only RSS.
>
> True, but if we can avoid more, it would still be better.
>
> > Last time we added the USO feature - is
> > it different?
>
> I may miss something but we never define tristate for USO?
>
>     DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features,
>                       VIRTIO_NET_F_GUEST_USO4, true),
>     DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features,
>                       VIRTIO_NET_F_GUEST_USO6, true),
>     DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
>                       VIRTIO_NET_F_HOST_USO, true),
>
When I've added USO feature I followed the existing approach of virtio-net.
On get_features - check what was "requested" including those that were "on"
by default and drop those that aren't supported (vhost by itself also
can drop some of features).

Still if we have on source machine kernel that supports USO (visible
on TAP flags)
and on dest we have older kernel without such support, the migration
will probably fail.

The available solution today is to reduce machine generation in
libvirt profile (as an example),
aligning the generation over all the machines that are expected to
participate in migration.

IMO we should think on some _generic_ solution, for example feature
negotiation between
machines before the migration - if the driver receives notification
from the device it
can negotiate the change of hardware features to OS (at least for most of them).
Not trivial, but IMO better than just failing the execution.

> ?
> > And in terms of migration "rss=on" is problematic the same way as "rss=auto".
>
> Failing early when launching Qemu is better than failing silently as a
> guest after a migration.

Do I understand correctly - you mean fail qemu initialization on the
destination machine?

>
> > Can you please show one scenario of migration where they will behave
> > differently?
>
> If you mean the problem of "auto", here's one:
>
> Assuming auto is used in both src and dst. On source, rss is enabled
> but not destination. RSS failed to work after migration.

I think in this case the migration will fail when set_feature is
called on destination.
The same way as with "on". Am I mistaken?

>
> > And in terms of regular experience there is a big advantage.
>
> Similarly, silent clearing a feature is also not good:
>
>     if (!peer_has_vnet_hdr(n)) {
>         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
>         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
>         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
>         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
>
>         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
>         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
>         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
>         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
>
>         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
>         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
>         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
>
>         virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
>     }
>
> The reason we never see complaints is probably because vhost/TAP are
> the only backend that supports migration where vnet support there has
> been more than a decade.

I think we never see complaints because we did not add new features
for a long time.

>
> Thanks
>
>
> >
> >
> > >
> > > Thanks
> > >
> > > > 4. The same with 'hash' option - it is not compatible with vhost (at
> > > > least at the moment)
> > > > 5. Reformat the patch as it is hard to review it due to replacing
> > > > entire procedures, i.e. one patch with replacing without changes,
> > > > another one - with real changes.
> > > > If this is hard to review only for me - please ignore that.
> > > >
> > > > > -
> > > > > -    /* This will stop vhost backend if appropriate. */
> > > > > -    virtio_net_set_status(vdev, 0);
> > > > > -
> > > > > -    g_free(n->netclient_name);
> > > > > -    n->netclient_name = NULL;
> > > > > -    g_free(n->netclient_type);
> > > > > -    n->netclient_type = NULL;
> > > > > -
> > > > > -    g_free(n->mac_table.macs);
> > > > > -    g_free(n->vlans);
> > > > > -
> > > > > -    if (n->failover) {
> > > > > -        qobject_unref(n->primary_opts);
> > > > > -        device_listener_unregister(&n->primary_listener);
> > > > > -        migration_remove_notifier(&n->migration_state);
> > > > > -    } else {
> > > > > -        assert(n->primary_opts == NULL);
> > > > > -    }
> > > > > -
> > > > > -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > > > -    for (i = 0; i < max_queue_pairs; i++) {
> > > > > -        virtio_net_del_queue(n, i);
> > > > > -    }
> > > > > -    /* delete also control vq */
> > > > > -    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > > > -    qemu_announce_timer_del(&n->announce_timer, false);
> > > > > -    g_free(n->vqs);
> > > > > -    qemu_del_nic(n->nic);
> > > > > -    virtio_net_rsc_cleanup(n);
> > > > > -    g_free(n->rss_data.indirections_table);
> > > > > -    net_rx_pkt_uninit(n->rx_pkt);
> > > > > -    virtio_cleanup(vdev);
> > > > >  }
> > > > >
> > > > >  static void virtio_net_reset(VirtIODevice *vdev)
> > > > >
> > > > > --
> > > > > 2.44.0
> > > > >
> > > >
> > >
> >
>
Yuri Benditovich April 16, 2024, 9:54 a.m. UTC | #10
On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> implicitly disables RSS even if the user explicitly requests it. Return
> an error instead of implicitly disabling RSS if RSS is requested but not
> available.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
>  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
>  1 file changed, 48 insertions(+), 49 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 61b49e335dea..3d53eba88cfc 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
>          return features;
>      }
>
> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> -    }
>      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
>      vdev->backend_features = features;
>
> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
>      return qatomic_read(&n->failover_primary_hidden);
>  }
>
> +static void virtio_net_device_unrealize(DeviceState *dev)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> +    VirtIONet *n = VIRTIO_NET(dev);
> +    int i, max_queue_pairs;
> +
> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> +        virtio_net_unload_ebpf(n);
> +    }
> +
> +    /* This will stop vhost backend if appropriate. */
> +    virtio_net_set_status(vdev, 0);
> +
> +    g_free(n->netclient_name);
> +    n->netclient_name = NULL;
> +    g_free(n->netclient_type);
> +    n->netclient_type = NULL;
> +
> +    g_free(n->mac_table.macs);
> +    g_free(n->vlans);
> +
> +    if (n->failover) {
> +        qobject_unref(n->primary_opts);
> +        device_listener_unregister(&n->primary_listener);
> +        migration_remove_notifier(&n->migration_state);
> +    } else {
> +        assert(n->primary_opts == NULL);
> +    }
> +
> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> +    for (i = 0; i < max_queue_pairs; i++) {
> +        virtio_net_del_queue(n, i);
> +    }
> +    /* delete also control vq */
> +    virtio_del_queue(vdev, max_queue_pairs * 2);
> +    qemu_announce_timer_del(&n->announce_timer, false);
> +    g_free(n->vqs);
> +    qemu_del_nic(n->nic);
> +    virtio_net_rsc_cleanup(n);
> +    g_free(n->rss_data.indirections_table);
> +    net_rx_pkt_uninit(n->rx_pkt);
> +    virtio_cleanup(vdev);
> +}
> +
>  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>
>      net_rx_pkt_init(&n->rx_pkt);
>
> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> -        virtio_net_load_ebpf(n);
> -    }
> -}
> -
> -static void virtio_net_device_unrealize(DeviceState *dev)
> -{
> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> -    VirtIONet *n = VIRTIO_NET(dev);
> -    int i, max_queue_pairs;
> -
> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> -        virtio_net_unload_ebpf(n);
> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> +        virtio_net_device_unrealize(dev);
> +        error_setg(errp, "Can't load eBPF RSS for vhost");

One more thing:
In case of failure the message (it will be visible to the user, if I'm
not mistaken)
should be more clear, with a suggestion to disable the 'rss' feature.

>      }
> -
> -    /* This will stop vhost backend if appropriate. */
> -    virtio_net_set_status(vdev, 0);
> -
> -    g_free(n->netclient_name);
> -    n->netclient_name = NULL;
> -    g_free(n->netclient_type);
> -    n->netclient_type = NULL;
> -
> -    g_free(n->mac_table.macs);
> -    g_free(n->vlans);
> -
> -    if (n->failover) {
> -        qobject_unref(n->primary_opts);
> -        device_listener_unregister(&n->primary_listener);
> -        migration_remove_notifier(&n->migration_state);
> -    } else {
> -        assert(n->primary_opts == NULL);
> -    }
> -
> -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> -    for (i = 0; i < max_queue_pairs; i++) {
> -        virtio_net_del_queue(n, i);
> -    }
> -    /* delete also control vq */
> -    virtio_del_queue(vdev, max_queue_pairs * 2);
> -    qemu_announce_timer_del(&n->announce_timer, false);
> -    g_free(n->vqs);
> -    qemu_del_nic(n->nic);
> -    virtio_net_rsc_cleanup(n);
> -    g_free(n->rss_data.indirections_table);
> -    net_rx_pkt_uninit(n->rx_pkt);
> -    virtio_cleanup(vdev);
>  }
>
>  static void virtio_net_reset(VirtIODevice *vdev)
>
> --
> 2.44.0
>
Jason Wang April 17, 2024, 4:18 a.m. UTC | #11
On Tue, Apr 16, 2024 at 5:51 PM Yuri Benditovich
<yuri.benditovich@daynix.com> wrote:
>
> On Tue, Apr 16, 2024 at 10:14 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Tue, Apr 16, 2024 at 1:43 PM Yuri Benditovich
> > <yuri.benditovich@daynix.com> wrote:
> > >
> > > On Tue, Apr 16, 2024 at 7:00 AM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> > > > <yuri.benditovich@daynix.com> wrote:
> > > > >
> > > > > On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> > > > > >
> > > > > > vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> > > > > > implicitly disables RSS even if the user explicitly requests it. Return
> > > > > > an error instead of implicitly disabling RSS if RSS is requested but not
> > > > > > available.
> > > > > >
> > > > > > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > > > > > ---
> > > > > >  hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> > > > > >  1 file changed, 48 insertions(+), 49 deletions(-)
> > > > > >
> > > > > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > > > > index 61b49e335dea..3d53eba88cfc 100644
> > > > > > --- a/hw/net/virtio-net.c
> > > > > > +++ b/hw/net/virtio-net.c
> > > > > > @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> > > > > >          return features;
> > > > > >      }
> > > > > >
> > > > > > -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> > > > > > -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> > > > > > -    }
> > > > > >      features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> > > > > >      vdev->backend_features = features;
> > > > > >
> > > > > > @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> > > > > >      return qatomic_read(&n->failover_primary_hidden);
> > > > > >  }
> > > > > >
> > > > > > +static void virtio_net_device_unrealize(DeviceState *dev)
> > > > > > +{
> > > > > > +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > > > +    VirtIONet *n = VIRTIO_NET(dev);
> > > > > > +    int i, max_queue_pairs;
> > > > > > +
> > > > > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > > > +        virtio_net_unload_ebpf(n);
> > > > > > +    }
> > > > > > +
> > > > > > +    /* This will stop vhost backend if appropriate. */
> > > > > > +    virtio_net_set_status(vdev, 0);
> > > > > > +
> > > > > > +    g_free(n->netclient_name);
> > > > > > +    n->netclient_name = NULL;
> > > > > > +    g_free(n->netclient_type);
> > > > > > +    n->netclient_type = NULL;
> > > > > > +
> > > > > > +    g_free(n->mac_table.macs);
> > > > > > +    g_free(n->vlans);
> > > > > > +
> > > > > > +    if (n->failover) {
> > > > > > +        qobject_unref(n->primary_opts);
> > > > > > +        device_listener_unregister(&n->primary_listener);
> > > > > > +        migration_remove_notifier(&n->migration_state);
> > > > > > +    } else {
> > > > > > +        assert(n->primary_opts == NULL);
> > > > > > +    }
> > > > > > +
> > > > > > +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > > > > +    for (i = 0; i < max_queue_pairs; i++) {
> > > > > > +        virtio_net_del_queue(n, i);
> > > > > > +    }
> > > > > > +    /* delete also control vq */
> > > > > > +    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > > > > +    qemu_announce_timer_del(&n->announce_timer, false);
> > > > > > +    g_free(n->vqs);
> > > > > > +    qemu_del_nic(n->nic);
> > > > > > +    virtio_net_rsc_cleanup(n);
> > > > > > +    g_free(n->rss_data.indirections_table);
> > > > > > +    net_rx_pkt_uninit(n->rx_pkt);
> > > > > > +    virtio_cleanup(vdev);
> > > > > > +}
> > > > > > +
> > > > > >  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > > > >  {
> > > > > >      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > > > @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> > > > > >
> > > > > >      net_rx_pkt_init(&n->rx_pkt);
> > > > > >
> > > > > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > > > -        virtio_net_load_ebpf(n);
> > > > > > -    }
> > > > > > -}
> > > > > > -
> > > > > > -static void virtio_net_device_unrealize(DeviceState *dev)
> > > > > > -{
> > > > > > -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > > > > > -    VirtIONet *n = VIRTIO_NET(dev);
> > > > > > -    int i, max_queue_pairs;
> > > > > > -
> > > > > > -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > > > > > -        virtio_net_unload_ebpf(n);
> > > > > > +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> > > > > > +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> > > > > > +        virtio_net_device_unrealize(dev);
> > > > > > +        error_setg(errp, "Can't load eBPF RSS for vhost");
> > > > > >      }
> > > > >
> > > > > As I already mentioned, I think this is an extremely bad idea to
> > > > > fail to run qemu due to such a reason as .absence of one feature.
> > > > > What I suggest is:
> > > > > 1. Redefine rss as tri-state (off|auto|on)
> > > > > 2. Fail to run only if rss is on and not available via ebpf
> > > > > 3. On auto - silently drop it
> > > >
> > > > "Auto" might be promatic for migration compatibility which is hard to
> > > > be used by management layers like libvirt. The reason is that there's
> > > > no way for libvirt to know if it is supported by device or not.
> > >
> > > In terms of migration every feature that somehow depends on the kernel
> > > is problematic, not only RSS.
> >
> > True, but if we can avoid more, it would still be better.
> >
> > > Last time we added the USO feature - is
> > > it different?
> >
> > I may miss something but we never define tristate for USO?
> >
> >     DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features,
> >                       VIRTIO_NET_F_GUEST_USO4, true),
> >     DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features,
> >                       VIRTIO_NET_F_GUEST_USO6, true),
> >     DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
> >                       VIRTIO_NET_F_HOST_USO, true),
> >
> When I've added USO feature I followed the existing approach of virtio-net.
> On get_features - check what was "requested" including those that were "on"
> by default and drop those that aren't supported (vhost by itself also
> can drop some of features).
>
> Still if we have on source machine kernel that supports USO (visible
> on TAP flags)
> and on dest we have older kernel without such support, the migration
> will probably fail.

I may miss something, do we have something USO specific to migrate? If
not, the migration won't fail. And even if migration fails, it's still
not good.

Kernel intends to remove UFO support from 2016, but it breaks the
migration so there's no other choice by introducing UFO (via
emulation) back.

>
> The available solution today is to reduce machine generation in
> libvirt profile (as an example),
> aligning the generation over all the machines that are expected to
> participate in migration.
>
> IMO we should think on some _generic_ solution, for example feature
> negotiation between
> machines before the migration - if the driver receives notification
> from the device it
> can negotiate the change of hardware features to OS (at least for most of them).
> Not trivial, but IMO better than just failing the execution.

Adding Jonathon.

Yes, technically libvirt can detect the support for USO/RSS and
generate the correct qemu command line.

But what I want to say is, failing the launching is still better than
failing the workload running in the guest.

>
> > ?
> > > And in terms of migration "rss=on" is problematic the same way as "rss=auto".
> >
> > Failing early when launching Qemu is better than failing silently as a
> > guest after a migration.
>
> Do I understand correctly - you mean fail qemu initialization on the
> destination machine?

Yes, it's a hint for the management layer that the migration
compatibility check is wrong.

>
> >
> > > Can you please show one scenario of migration where they will behave
> > > differently?
> >
> > If you mean the problem of "auto", here's one:
> >
> > Assuming auto is used in both src and dst. On source, rss is enabled
> > but not destination. RSS failed to work after migration.
>
> I think in this case the migration will fail when set_feature is
> called on destination.
> The same way as with "on". Am I mistaken?

See above.

>
> >
> > > And in terms of regular experience there is a big advantage.
> >
> > Similarly, silent clearing a feature is also not good:
> >
> >     if (!peer_has_vnet_hdr(n)) {
> >         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
> >
> >         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
> >
> >         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
> >         virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
> >
> >         virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
> >     }
> >
> > The reason we never see complaints is probably because vhost/TAP are
> > the only backend that supports migration where vnet support there has
> > been more than a decade.
>
> I think we never see complaints because we did not add new features
> for a long time.

Probably but I basically meant peer_has_vnet_hdr() is always true for
the cases we support. So Qemu won't silently clear them even if they
were turned on explicitly by qemu command line.

Thanks

>
> >
> > Thanks
> >
> >
> > >
> > >
> > > >
> > > > Thanks
> > > >
> > > > > 4. The same with 'hash' option - it is not compatible with vhost (at
> > > > > least at the moment)
> > > > > 5. Reformat the patch as it is hard to review it due to replacing
> > > > > entire procedures, i.e. one patch with replacing without changes,
> > > > > another one - with real changes.
> > > > > If this is hard to review only for me - please ignore that.
> > > > >
> > > > > > -
> > > > > > -    /* This will stop vhost backend if appropriate. */
> > > > > > -    virtio_net_set_status(vdev, 0);
> > > > > > -
> > > > > > -    g_free(n->netclient_name);
> > > > > > -    n->netclient_name = NULL;
> > > > > > -    g_free(n->netclient_type);
> > > > > > -    n->netclient_type = NULL;
> > > > > > -
> > > > > > -    g_free(n->mac_table.macs);
> > > > > > -    g_free(n->vlans);
> > > > > > -
> > > > > > -    if (n->failover) {
> > > > > > -        qobject_unref(n->primary_opts);
> > > > > > -        device_listener_unregister(&n->primary_listener);
> > > > > > -        migration_remove_notifier(&n->migration_state);
> > > > > > -    } else {
> > > > > > -        assert(n->primary_opts == NULL);
> > > > > > -    }
> > > > > > -
> > > > > > -    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> > > > > > -    for (i = 0; i < max_queue_pairs; i++) {
> > > > > > -        virtio_net_del_queue(n, i);
> > > > > > -    }
> > > > > > -    /* delete also control vq */
> > > > > > -    virtio_del_queue(vdev, max_queue_pairs * 2);
> > > > > > -    qemu_announce_timer_del(&n->announce_timer, false);
> > > > > > -    g_free(n->vqs);
> > > > > > -    qemu_del_nic(n->nic);
> > > > > > -    virtio_net_rsc_cleanup(n);
> > > > > > -    g_free(n->rss_data.indirections_table);
> > > > > > -    net_rx_pkt_uninit(n->rx_pkt);
> > > > > > -    virtio_cleanup(vdev);
> > > > > >  }
> > > > > >
> > > > > >  static void virtio_net_reset(VirtIODevice *vdev)
> > > > > >
> > > > > > --
> > > > > > 2.44.0
> > > > > >
> > > > >
> > > >
> > >
> >
>
Yuri Benditovich April 20, 2024, 2:27 p.m. UTC | #12
On Tue, Apr 16, 2024 at 9:54 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2024/04/16 13:00, Jason Wang wrote:
> > On Mon, Apr 15, 2024 at 10:05 PM Yuri Benditovich
> > <yuri.benditovich@daynix.com> wrote:
> >>
> >> On Wed, Apr 3, 2024 at 2:11 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>
> >>> vhost requires eBPF for RSS. When eBPF is not available, virtio-net
> >>> implicitly disables RSS even if the user explicitly requests it. Return
> >>> an error instead of implicitly disabling RSS if RSS is requested but not
> >>> available.
> >>>
> >>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>> ---
> >>>   hw/net/virtio-net.c | 97 ++++++++++++++++++++++++++---------------------------
> >>>   1 file changed, 48 insertions(+), 49 deletions(-)
> >>>
> >>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> >>> index 61b49e335dea..3d53eba88cfc 100644
> >>> --- a/hw/net/virtio-net.c
> >>> +++ b/hw/net/virtio-net.c
> >>> @@ -793,9 +793,6 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> >>>           return features;
> >>>       }
> >>>
> >>> -    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> >>> -        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> >>> -    }
> >>>       features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> >>>       vdev->backend_features = features;
> >>>
> >>> @@ -3591,6 +3588,50 @@ static bool failover_hide_primary_device(DeviceListener *listener,
> >>>       return qatomic_read(&n->failover_primary_hidden);
> >>>   }
> >>>
> >>> +static void virtio_net_device_unrealize(DeviceState *dev)
> >>> +{
> >>> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >>> +    VirtIONet *n = VIRTIO_NET(dev);
> >>> +    int i, max_queue_pairs;
> >>> +
> >>> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> >>> +        virtio_net_unload_ebpf(n);
> >>> +    }
> >>> +
> >>> +    /* This will stop vhost backend if appropriate. */
> >>> +    virtio_net_set_status(vdev, 0);
> >>> +
> >>> +    g_free(n->netclient_name);
> >>> +    n->netclient_name = NULL;
> >>> +    g_free(n->netclient_type);
> >>> +    n->netclient_type = NULL;
> >>> +
> >>> +    g_free(n->mac_table.macs);
> >>> +    g_free(n->vlans);
> >>> +
> >>> +    if (n->failover) {
> >>> +        qobject_unref(n->primary_opts);
> >>> +        device_listener_unregister(&n->primary_listener);
> >>> +        migration_remove_notifier(&n->migration_state);
> >>> +    } else {
> >>> +        assert(n->primary_opts == NULL);
> >>> +    }
> >>> +
> >>> +    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
> >>> +    for (i = 0; i < max_queue_pairs; i++) {
> >>> +        virtio_net_del_queue(n, i);
> >>> +    }
> >>> +    /* delete also control vq */
> >>> +    virtio_del_queue(vdev, max_queue_pairs * 2);
> >>> +    qemu_announce_timer_del(&n->announce_timer, false);
> >>> +    g_free(n->vqs);
> >>> +    qemu_del_nic(n->nic);
> >>> +    virtio_net_rsc_cleanup(n);
> >>> +    g_free(n->rss_data.indirections_table);
> >>> +    net_rx_pkt_uninit(n->rx_pkt);
> >>> +    virtio_cleanup(vdev);
> >>> +}
> >>> +
> >>>   static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >>>   {
> >>>       VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >>> @@ -3760,53 +3801,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >>>
> >>>       net_rx_pkt_init(&n->rx_pkt);
> >>>
> >>> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> >>> -        virtio_net_load_ebpf(n);
> >>> -    }
> >>> -}
> >>> -
> >>> -static void virtio_net_device_unrealize(DeviceState *dev)
> >>> -{
> >>> -    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >>> -    VirtIONet *n = VIRTIO_NET(dev);
> >>> -    int i, max_queue_pairs;
> >>> -
> >>> -    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> >>> -        virtio_net_unload_ebpf(n);
> >>> +    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
> >>> +        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
> >>> +        virtio_net_device_unrealize(dev);
> >>> +        error_setg(errp, "Can't load eBPF RSS for vhost");
> >>>       }
> >>
> >> As I already mentioned, I think this is an extremely bad idea to
> >> fail to run qemu due to such a reason as .absence of one feature.
> >> What I suggest is:
> >> 1. Redefine rss as tri-state (off|auto|on)
> >> 2. Fail to run only if rss is on and not available via ebpf
> >> 3. On auto - silently drop it
> >
> > "Auto" might be promatic for migration compatibility which is hard to
> > be used by management layers like libvirt. The reason is that there's
> > no way for libvirt to know if it is supported by device or not.
>
> Certainly auto is not good for migration, but it is useful in the other
> situations. You can still set "on" or "off" if you care migration. I'll
> add "auto" support in the next version.

It will be very nice if you take this patch to separate series, all
others will pass without questions, I think.

Thanks,
Yuri Benditovich

>
> >
> > Thanks
> >
> >> 4. The same with 'hash' option - it is not compatible with vhost (at
> >> least at the moment)
> >> 5. Reformat the patch as it is hard to review it due to replacing
> >> entire procedures, i.e. one patch with replacing without changes,
> >> another one - with real changes. >> If this is hard to review only for me - please ignore that.
>
> I'll split this patch accordingly in the next version.
>
> Regards,
> Akihiko Odak
diff mbox series

Patch

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 61b49e335dea..3d53eba88cfc 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -793,9 +793,6 @@  static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
         return features;
     }
 
-    if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
-        virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
-    }
     features = vhost_net_get_features(get_vhost_net(nc->peer), features);
     vdev->backend_features = features;
 
@@ -3591,6 +3588,50 @@  static bool failover_hide_primary_device(DeviceListener *listener,
     return qatomic_read(&n->failover_primary_hidden);
 }
 
+static void virtio_net_device_unrealize(DeviceState *dev)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+    VirtIONet *n = VIRTIO_NET(dev);
+    int i, max_queue_pairs;
+
+    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
+        virtio_net_unload_ebpf(n);
+    }
+
+    /* This will stop vhost backend if appropriate. */
+    virtio_net_set_status(vdev, 0);
+
+    g_free(n->netclient_name);
+    n->netclient_name = NULL;
+    g_free(n->netclient_type);
+    n->netclient_type = NULL;
+
+    g_free(n->mac_table.macs);
+    g_free(n->vlans);
+
+    if (n->failover) {
+        qobject_unref(n->primary_opts);
+        device_listener_unregister(&n->primary_listener);
+        migration_remove_notifier(&n->migration_state);
+    } else {
+        assert(n->primary_opts == NULL);
+    }
+
+    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
+    for (i = 0; i < max_queue_pairs; i++) {
+        virtio_net_del_queue(n, i);
+    }
+    /* delete also control vq */
+    virtio_del_queue(vdev, max_queue_pairs * 2);
+    qemu_announce_timer_del(&n->announce_timer, false);
+    g_free(n->vqs);
+    qemu_del_nic(n->nic);
+    virtio_net_rsc_cleanup(n);
+    g_free(n->rss_data.indirections_table);
+    net_rx_pkt_uninit(n->rx_pkt);
+    virtio_cleanup(vdev);
+}
+
 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -3760,53 +3801,11 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
 
     net_rx_pkt_init(&n->rx_pkt);
 
-    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
-        virtio_net_load_ebpf(n);
-    }
-}
-
-static void virtio_net_device_unrealize(DeviceState *dev)
-{
-    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
-    VirtIONet *n = VIRTIO_NET(dev);
-    int i, max_queue_pairs;
-
-    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
-        virtio_net_unload_ebpf(n);
+    if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS) &&
+        !virtio_net_load_ebpf(n) && get_vhost_net(nc->peer)) {
+        virtio_net_device_unrealize(dev);
+        error_setg(errp, "Can't load eBPF RSS for vhost");
     }
-
-    /* This will stop vhost backend if appropriate. */
-    virtio_net_set_status(vdev, 0);
-
-    g_free(n->netclient_name);
-    n->netclient_name = NULL;
-    g_free(n->netclient_type);
-    n->netclient_type = NULL;
-
-    g_free(n->mac_table.macs);
-    g_free(n->vlans);
-
-    if (n->failover) {
-        qobject_unref(n->primary_opts);
-        device_listener_unregister(&n->primary_listener);
-        migration_remove_notifier(&n->migration_state);
-    } else {
-        assert(n->primary_opts == NULL);
-    }
-
-    max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
-    for (i = 0; i < max_queue_pairs; i++) {
-        virtio_net_del_queue(n, i);
-    }
-    /* delete also control vq */
-    virtio_del_queue(vdev, max_queue_pairs * 2);
-    qemu_announce_timer_del(&n->announce_timer, false);
-    g_free(n->vqs);
-    qemu_del_nic(n->nic);
-    virtio_net_rsc_cleanup(n);
-    g_free(n->rss_data.indirections_table);
-    net_rx_pkt_uninit(n->rx_pkt);
-    virtio_cleanup(vdev);
 }
 
 static void virtio_net_reset(VirtIODevice *vdev)