diff mbox series

[v2,2/5] virtio-net: align ctrl_vq index for non-mq guest for vhost_vdpa

Message ID 1651048216-3365-3-git-send-email-si-wei.liu@oracle.com (mailing list archive)
State New, archived
Headers show
Series vhost-vdpa multiqueue fixes | expand

Commit Message

Si-Wei Liu April 27, 2022, 8:30 a.m. UTC
With MQ enabled vdpa device and non-MQ supporting guest e.g.
booting vdpa with mq=on over OVMF of single vqp, below assert
failure is seen:

../hw/virtio/vhost-vdpa.c:560: vhost_vdpa_get_vq_index: Assertion `idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs' failed.

0  0x00007f8ce3ff3387 in raise () at /lib64/libc.so.6
1  0x00007f8ce3ff4a78 in abort () at /lib64/libc.so.6
2  0x00007f8ce3fec1a6 in __assert_fail_base () at /lib64/libc.so.6
3  0x00007f8ce3fec252 in  () at /lib64/libc.so.6
4  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:563
5  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:558
6  0x0000558f52d7329a in vhost_virtqueue_mask (hdev=0x558f55c01800, vdev=0x558f568f91f0, n=2, mask=<optimized out>) at ../hw/virtio/vhost.c:1557
7  0x0000558f52c6b89a in virtio_pci_set_guest_notifier (d=d@entry=0x558f568f0f60, n=n@entry=2, assign=assign@entry=true, with_irqfd=with_irqfd@entry=false)
   at ../hw/virtio/virtio-pci.c:974
8  0x0000558f52c6c0d8 in virtio_pci_set_guest_notifiers (d=0x558f568f0f60, nvqs=3, assign=true) at ../hw/virtio/virtio-pci.c:1019
9  0x0000558f52bf091d in vhost_net_start (dev=dev@entry=0x558f568f91f0, ncs=0x558f56937cd0, data_queue_pairs=data_queue_pairs@entry=1, cvq=cvq@entry=1)
   at ../hw/net/vhost_net.c:361
10 0x0000558f52d4e5e7 in virtio_net_set_status (status=<optimized out>, n=0x558f568f91f0) at ../hw/net/virtio-net.c:289
11 0x0000558f52d4e5e7 in virtio_net_set_status (vdev=0x558f568f91f0, status=15 '\017') at ../hw/net/virtio-net.c:370
12 0x0000558f52d6c4b2 in virtio_set_status (vdev=vdev@entry=0x558f568f91f0, val=val@entry=15 '\017') at ../hw/virtio/virtio.c:1945
13 0x0000558f52c69eff in virtio_pci_common_write (opaque=0x558f568f0f60, addr=<optimized out>, val=<optimized out>, size=<optimized out>) at ../hw/virtio/virtio-pci.c:1292
14 0x0000558f52d15d6e in memory_region_write_accessor (mr=0x558f568f19d0, addr=20, value=<optimized out>, size=1, shift=<optimized out>, mask=<optimized out>, attrs=...)
   at ../softmmu/memory.c:492
15 0x0000558f52d127de in access_with_adjusted_size (addr=addr@entry=20, value=value@entry=0x7f8cdbffe748, size=size@entry=1, access_size_min=<optimized out>, access_size_max=<optimized out>, access_fn=0x558f52d15cf0 <memory_region_write_accessor>, mr=0x558f568f19d0, attrs=...) at ../softmmu/memory.c:554
16 0x0000558f52d157ef in memory_region_dispatch_write (mr=mr@entry=0x558f568f19d0, addr=20, data=<optimized out>, op=<optimized out>, attrs=attrs@entry=...)
   at ../softmmu/memory.c:1504
17 0x0000558f52d078e7 in flatview_write_continue (fv=fv@entry=0x7f8accbc3b90, addr=addr@entry=103079215124, attrs=..., ptr=ptr@entry=0x7f8ce6300028, len=len@entry=1, addr1=<optimized out>, l=<optimized out>, mr=0x558f568f19d0) at /home/opc/qemu-upstream/include/qemu/host-utils.h:165
18 0x0000558f52d07b06 in flatview_write (fv=0x7f8accbc3b90, addr=103079215124, attrs=..., buf=0x7f8ce6300028, len=1) at ../softmmu/physmem.c:2822
19 0x0000558f52d0b36b in address_space_write (as=<optimized out>, addr=<optimized out>, attrs=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>)
   at ../softmmu/physmem.c:2914
20 0x0000558f52d0b3da in address_space_rw (as=<optimized out>, addr=<optimized out>, attrs=...,
   attrs@entry=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>, is_write=<optimized out>) at ../softmmu/physmem.c:2924
21 0x0000558f52dced09 in kvm_cpu_exec (cpu=cpu@entry=0x558f55c2da60) at ../accel/kvm/kvm-all.c:2903
22 0x0000558f52dcfabd in kvm_vcpu_thread_fn (arg=arg@entry=0x558f55c2da60) at ../accel/kvm/kvm-accel-ops.c:49
23 0x0000558f52f9f04a in qemu_thread_start (args=<optimized out>) at ../util/qemu-thread-posix.c:556
24 0x00007f8ce4392ea5 in start_thread () at /lib64/libpthread.so.0
25 0x00007f8ce40bb9fd in clone () at /lib64/libc.so.6

The cause for the assert failure is due to that the vhost_dev index
for the ctrl vq was not aligned with actual one in use by the guest.
Upon multiqueue feature negotiation in virtio_net_set_multiqueue(),
if guest doesn't support multiqueue, the guest vq layout would shrink
to a single queue pair, consisting of 3 vqs in total (rx, tx and ctrl).
This results in ctrl_vq taking a different vhost_dev group index than
the default. We can map vq to the correct vhost_dev group by checking
if MQ is supported by guest and successfully negotiated. Since the
MQ feature is only present along with CTRL_VQ, we make sure the index
2 is only meant for the control vq while MQ is not supported by guest.

Fixes: 22288fe ("virtio-net: vhost control virtqueue support")
Suggested-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 hw/net/virtio-net.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

Comments

Jason Wang April 29, 2022, 2:23 a.m. UTC | #1
在 2022/4/27 16:30, Si-Wei Liu 写道:
> With MQ enabled vdpa device and non-MQ supporting guest e.g.
> booting vdpa with mq=on over OVMF of single vqp, below assert
> failure is seen:
>
> ../hw/virtio/vhost-vdpa.c:560: vhost_vdpa_get_vq_index: Assertion `idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs' failed.
>
> 0  0x00007f8ce3ff3387 in raise () at /lib64/libc.so.6
> 1  0x00007f8ce3ff4a78 in abort () at /lib64/libc.so.6
> 2  0x00007f8ce3fec1a6 in __assert_fail_base () at /lib64/libc.so.6
> 3  0x00007f8ce3fec252 in  () at /lib64/libc.so.6
> 4  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:563
> 5  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:558
> 6  0x0000558f52d7329a in vhost_virtqueue_mask (hdev=0x558f55c01800, vdev=0x558f568f91f0, n=2, mask=<optimized out>) at ../hw/virtio/vhost.c:1557
> 7  0x0000558f52c6b89a in virtio_pci_set_guest_notifier (d=d@entry=0x558f568f0f60, n=n@entry=2, assign=assign@entry=true, with_irqfd=with_irqfd@entry=false)
>     at ../hw/virtio/virtio-pci.c:974
> 8  0x0000558f52c6c0d8 in virtio_pci_set_guest_notifiers (d=0x558f568f0f60, nvqs=3, assign=true) at ../hw/virtio/virtio-pci.c:1019
> 9  0x0000558f52bf091d in vhost_net_start (dev=dev@entry=0x558f568f91f0, ncs=0x558f56937cd0, data_queue_pairs=data_queue_pairs@entry=1, cvq=cvq@entry=1)
>     at ../hw/net/vhost_net.c:361
> 10 0x0000558f52d4e5e7 in virtio_net_set_status (status=<optimized out>, n=0x558f568f91f0) at ../hw/net/virtio-net.c:289
> 11 0x0000558f52d4e5e7 in virtio_net_set_status (vdev=0x558f568f91f0, status=15 '\017') at ../hw/net/virtio-net.c:370
> 12 0x0000558f52d6c4b2 in virtio_set_status (vdev=vdev@entry=0x558f568f91f0, val=val@entry=15 '\017') at ../hw/virtio/virtio.c:1945
> 13 0x0000558f52c69eff in virtio_pci_common_write (opaque=0x558f568f0f60, addr=<optimized out>, val=<optimized out>, size=<optimized out>) at ../hw/virtio/virtio-pci.c:1292
> 14 0x0000558f52d15d6e in memory_region_write_accessor (mr=0x558f568f19d0, addr=20, value=<optimized out>, size=1, shift=<optimized out>, mask=<optimized out>, attrs=...)
>     at ../softmmu/memory.c:492
> 15 0x0000558f52d127de in access_with_adjusted_size (addr=addr@entry=20, value=value@entry=0x7f8cdbffe748, size=size@entry=1, access_size_min=<optimized out>, access_size_max=<optimized out>, access_fn=0x558f52d15cf0 <memory_region_write_accessor>, mr=0x558f568f19d0, attrs=...) at ../softmmu/memory.c:554
> 16 0x0000558f52d157ef in memory_region_dispatch_write (mr=mr@entry=0x558f568f19d0, addr=20, data=<optimized out>, op=<optimized out>, attrs=attrs@entry=...)
>     at ../softmmu/memory.c:1504
> 17 0x0000558f52d078e7 in flatview_write_continue (fv=fv@entry=0x7f8accbc3b90, addr=addr@entry=103079215124, attrs=..., ptr=ptr@entry=0x7f8ce6300028, len=len@entry=1, addr1=<optimized out>, l=<optimized out>, mr=0x558f568f19d0) at /home/opc/qemu-upstream/include/qemu/host-utils.h:165
> 18 0x0000558f52d07b06 in flatview_write (fv=0x7f8accbc3b90, addr=103079215124, attrs=..., buf=0x7f8ce6300028, len=1) at ../softmmu/physmem.c:2822
> 19 0x0000558f52d0b36b in address_space_write (as=<optimized out>, addr=<optimized out>, attrs=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>)
>     at ../softmmu/physmem.c:2914
> 20 0x0000558f52d0b3da in address_space_rw (as=<optimized out>, addr=<optimized out>, attrs=...,
>     attrs@entry=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>, is_write=<optimized out>) at ../softmmu/physmem.c:2924
> 21 0x0000558f52dced09 in kvm_cpu_exec (cpu=cpu@entry=0x558f55c2da60) at ../accel/kvm/kvm-all.c:2903
> 22 0x0000558f52dcfabd in kvm_vcpu_thread_fn (arg=arg@entry=0x558f55c2da60) at ../accel/kvm/kvm-accel-ops.c:49
> 23 0x0000558f52f9f04a in qemu_thread_start (args=<optimized out>) at ../util/qemu-thread-posix.c:556
> 24 0x00007f8ce4392ea5 in start_thread () at /lib64/libpthread.so.0
> 25 0x00007f8ce40bb9fd in clone () at /lib64/libc.so.6
>
> The cause for the assert failure is due to that the vhost_dev index
> for the ctrl vq was not aligned with actual one in use by the guest.
> Upon multiqueue feature negotiation in virtio_net_set_multiqueue(),
> if guest doesn't support multiqueue, the guest vq layout would shrink
> to a single queue pair, consisting of 3 vqs in total (rx, tx and ctrl).
> This results in ctrl_vq taking a different vhost_dev group index than
> the default. We can map vq to the correct vhost_dev group by checking
> if MQ is supported by guest and successfully negotiated. Since the
> MQ feature is only present along with CTRL_VQ, we make sure the index
> 2 is only meant for the control vq while MQ is not supported by guest.
>
> Fixes: 22288fe ("virtio-net: vhost control virtqueue support")
> Suggested-by: Jason Wang <jasowang@redhat.com>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>   hw/net/virtio-net.c | 22 ++++++++++++++++++++--
>   1 file changed, 20 insertions(+), 2 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index ffb3475..8ca0b80 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -3171,8 +3171,17 @@ static NetClientInfo net_virtio_info = {
>   static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
>   {
>       VirtIONet *n = VIRTIO_NET(vdev);
> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> +    NetClientState *nc;
>       assert(n->vhost_started);
> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
> +           error_report("virtio-net: bogus vq index ignored");


This seems trigger-able by guest.

Other looks good.

Thanks


> +           return false;
> +        }
> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
> +    } else {
> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
> +    }
>       return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
>   }
>   
> @@ -3180,8 +3189,17 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
>                                              bool mask)
>   {
>       VirtIONet *n = VIRTIO_NET(vdev);
> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> +    NetClientState *nc;
>       assert(n->vhost_started);
> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
> +           error_report("virtio-net: bogus vq index ignored");
> +           return;
> +        }
> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
> +    } else {
> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
> +    }
>       vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
>                                vdev, idx, mask);
>   }
Jason Wang April 29, 2022, 2:24 a.m. UTC | #2
On Fri, Apr 29, 2022 at 10:24 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/4/27 16:30, Si-Wei Liu 写道:
> > With MQ enabled vdpa device and non-MQ supporting guest e.g.
> > booting vdpa with mq=on over OVMF of single vqp, below assert
> > failure is seen:
> >
> > ../hw/virtio/vhost-vdpa.c:560: vhost_vdpa_get_vq_index: Assertion `idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs' failed.
> >
> > 0  0x00007f8ce3ff3387 in raise () at /lib64/libc.so.6
> > 1  0x00007f8ce3ff4a78 in abort () at /lib64/libc.so.6
> > 2  0x00007f8ce3fec1a6 in __assert_fail_base () at /lib64/libc.so.6
> > 3  0x00007f8ce3fec252 in  () at /lib64/libc.so.6
> > 4  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:563
> > 5  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:558
> > 6  0x0000558f52d7329a in vhost_virtqueue_mask (hdev=0x558f55c01800, vdev=0x558f568f91f0, n=2, mask=<optimized out>) at ../hw/virtio/vhost.c:1557
> > 7  0x0000558f52c6b89a in virtio_pci_set_guest_notifier (d=d@entry=0x558f568f0f60, n=n@entry=2, assign=assign@entry=true, with_irqfd=with_irqfd@entry=false)
> >     at ../hw/virtio/virtio-pci.c:974
> > 8  0x0000558f52c6c0d8 in virtio_pci_set_guest_notifiers (d=0x558f568f0f60, nvqs=3, assign=true) at ../hw/virtio/virtio-pci.c:1019
> > 9  0x0000558f52bf091d in vhost_net_start (dev=dev@entry=0x558f568f91f0, ncs=0x558f56937cd0, data_queue_pairs=data_queue_pairs@entry=1, cvq=cvq@entry=1)
> >     at ../hw/net/vhost_net.c:361
> > 10 0x0000558f52d4e5e7 in virtio_net_set_status (status=<optimized out>, n=0x558f568f91f0) at ../hw/net/virtio-net.c:289
> > 11 0x0000558f52d4e5e7 in virtio_net_set_status (vdev=0x558f568f91f0, status=15 '\017') at ../hw/net/virtio-net.c:370
> > 12 0x0000558f52d6c4b2 in virtio_set_status (vdev=vdev@entry=0x558f568f91f0, val=val@entry=15 '\017') at ../hw/virtio/virtio.c:1945
> > 13 0x0000558f52c69eff in virtio_pci_common_write (opaque=0x558f568f0f60, addr=<optimized out>, val=<optimized out>, size=<optimized out>) at ../hw/virtio/virtio-pci.c:1292
> > 14 0x0000558f52d15d6e in memory_region_write_accessor (mr=0x558f568f19d0, addr=20, value=<optimized out>, size=1, shift=<optimized out>, mask=<optimized out>, attrs=...)
> >     at ../softmmu/memory.c:492
> > 15 0x0000558f52d127de in access_with_adjusted_size (addr=addr@entry=20, value=value@entry=0x7f8cdbffe748, size=size@entry=1, access_size_min=<optimized out>, access_size_max=<optimized out>, access_fn=0x558f52d15cf0 <memory_region_write_accessor>, mr=0x558f568f19d0, attrs=...) at ../softmmu/memory.c:554
> > 16 0x0000558f52d157ef in memory_region_dispatch_write (mr=mr@entry=0x558f568f19d0, addr=20, data=<optimized out>, op=<optimized out>, attrs=attrs@entry=...)
> >     at ../softmmu/memory.c:1504
> > 17 0x0000558f52d078e7 in flatview_write_continue (fv=fv@entry=0x7f8accbc3b90, addr=addr@entry=103079215124, attrs=..., ptr=ptr@entry=0x7f8ce6300028, len=len@entry=1, addr1=<optimized out>, l=<optimized out>, mr=0x558f568f19d0) at /home/opc/qemu-upstream/include/qemu/host-utils.h:165
> > 18 0x0000558f52d07b06 in flatview_write (fv=0x7f8accbc3b90, addr=103079215124, attrs=..., buf=0x7f8ce6300028, len=1) at ../softmmu/physmem.c:2822
> > 19 0x0000558f52d0b36b in address_space_write (as=<optimized out>, addr=<optimized out>, attrs=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>)
> >     at ../softmmu/physmem.c:2914
> > 20 0x0000558f52d0b3da in address_space_rw (as=<optimized out>, addr=<optimized out>, attrs=...,
> >     attrs@entry=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>, is_write=<optimized out>) at ../softmmu/physmem.c:2924
> > 21 0x0000558f52dced09 in kvm_cpu_exec (cpu=cpu@entry=0x558f55c2da60) at ../accel/kvm/kvm-all.c:2903
> > 22 0x0000558f52dcfabd in kvm_vcpu_thread_fn (arg=arg@entry=0x558f55c2da60) at ../accel/kvm/kvm-accel-ops.c:49
> > 23 0x0000558f52f9f04a in qemu_thread_start (args=<optimized out>) at ../util/qemu-thread-posix.c:556
> > 24 0x00007f8ce4392ea5 in start_thread () at /lib64/libpthread.so.0
> > 25 0x00007f8ce40bb9fd in clone () at /lib64/libc.so.6
> >
> > The cause for the assert failure is due to that the vhost_dev index
> > for the ctrl vq was not aligned with actual one in use by the guest.
> > Upon multiqueue feature negotiation in virtio_net_set_multiqueue(),
> > if guest doesn't support multiqueue, the guest vq layout would shrink
> > to a single queue pair, consisting of 3 vqs in total (rx, tx and ctrl).
> > This results in ctrl_vq taking a different vhost_dev group index than
> > the default. We can map vq to the correct vhost_dev group by checking
> > if MQ is supported by guest and successfully negotiated. Since the
> > MQ feature is only present along with CTRL_VQ, we make sure the index
> > 2 is only meant for the control vq while MQ is not supported by guest.
> >
> > Fixes: 22288fe ("virtio-net: vhost control virtqueue support")
> > Suggested-by: Jason Wang <jasowang@redhat.com>
> > Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> > ---
> >   hw/net/virtio-net.c | 22 ++++++++++++++++++++--
> >   1 file changed, 20 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index ffb3475..8ca0b80 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -3171,8 +3171,17 @@ static NetClientInfo net_virtio_info = {
> >   static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
> >   {
> >       VirtIONet *n = VIRTIO_NET(vdev);
> > -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> > +    NetClientState *nc;
> >       assert(n->vhost_started);
> > +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
> > +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
> > +           error_report("virtio-net: bogus vq index ignored");
>
>
> This seems trigger-able by guest.
>
> Other looks good.

Btw, it would be better to add a comment to explain here.

Thanks

>
> Thanks
>
>
> > +           return false;
> > +        }
> > +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
> > +    } else {
> > +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
> > +    }
> >       return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> >   }
> >
> > @@ -3180,8 +3189,17 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> >                                              bool mask)
> >   {
> >       VirtIONet *n = VIRTIO_NET(vdev);
> > -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> > +    NetClientState *nc;
> >       assert(n->vhost_started);
> > +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
> > +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
> > +           error_report("virtio-net: bogus vq index ignored");
> > +           return;
> > +        }
> > +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
> > +    } else {
> > +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
> > +    }
> >       vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> >                                vdev, idx, mask);
> >   }
Si-Wei Liu April 30, 2022, 1:12 a.m. UTC | #3
On 4/28/2022 7:23 PM, Jason Wang wrote:
>
> 在 2022/4/27 16:30, Si-Wei Liu 写道:
>> With MQ enabled vdpa device and non-MQ supporting guest e.g.
>> booting vdpa with mq=on over OVMF of single vqp, below assert
>> failure is seen:
>>
>> ../hw/virtio/vhost-vdpa.c:560: vhost_vdpa_get_vq_index: Assertion 
>> `idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs' failed.
>>
>> 0  0x00007f8ce3ff3387 in raise () at /lib64/libc.so.6
>> 1  0x00007f8ce3ff4a78 in abort () at /lib64/libc.so.6
>> 2  0x00007f8ce3fec1a6 in __assert_fail_base () at /lib64/libc.so.6
>> 3  0x00007f8ce3fec252 in  () at /lib64/libc.so.6
>> 4  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized 
>> out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:563
>> 5  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized 
>> out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:558
>> 6  0x0000558f52d7329a in vhost_virtqueue_mask (hdev=0x558f55c01800, 
>> vdev=0x558f568f91f0, n=2, mask=<optimized out>) at 
>> ../hw/virtio/vhost.c:1557
>> 7  0x0000558f52c6b89a in virtio_pci_set_guest_notifier 
>> (d=d@entry=0x558f568f0f60, n=n@entry=2, assign=assign@entry=true, 
>> with_irqfd=with_irqfd@entry=false)
>>     at ../hw/virtio/virtio-pci.c:974
>> 8  0x0000558f52c6c0d8 in virtio_pci_set_guest_notifiers 
>> (d=0x558f568f0f60, nvqs=3, assign=true) at 
>> ../hw/virtio/virtio-pci.c:1019
>> 9  0x0000558f52bf091d in vhost_net_start 
>> (dev=dev@entry=0x558f568f91f0, ncs=0x558f56937cd0, 
>> data_queue_pairs=data_queue_pairs@entry=1, cvq=cvq@entry=1)
>>     at ../hw/net/vhost_net.c:361
>> 10 0x0000558f52d4e5e7 in virtio_net_set_status (status=<optimized 
>> out>, n=0x558f568f91f0) at ../hw/net/virtio-net.c:289
>> 11 0x0000558f52d4e5e7 in virtio_net_set_status (vdev=0x558f568f91f0, 
>> status=15 '\017') at ../hw/net/virtio-net.c:370
>> 12 0x0000558f52d6c4b2 in virtio_set_status 
>> (vdev=vdev@entry=0x558f568f91f0, val=val@entry=15 '\017') at 
>> ../hw/virtio/virtio.c:1945
>> 13 0x0000558f52c69eff in virtio_pci_common_write 
>> (opaque=0x558f568f0f60, addr=<optimized out>, val=<optimized out>, 
>> size=<optimized out>) at ../hw/virtio/virtio-pci.c:1292
>> 14 0x0000558f52d15d6e in memory_region_write_accessor 
>> (mr=0x558f568f19d0, addr=20, value=<optimized out>, size=1, 
>> shift=<optimized out>, mask=<optimized out>, attrs=...)
>>     at ../softmmu/memory.c:492
>> 15 0x0000558f52d127de in access_with_adjusted_size 
>> (addr=addr@entry=20, value=value@entry=0x7f8cdbffe748, 
>> size=size@entry=1, access_size_min=<optimized out>, 
>> access_size_max=<optimized out>, access_fn=0x558f52d15cf0 
>> <memory_region_write_accessor>, mr=0x558f568f19d0, attrs=...) at 
>> ../softmmu/memory.c:554
>> 16 0x0000558f52d157ef in memory_region_dispatch_write 
>> (mr=mr@entry=0x558f568f19d0, addr=20, data=<optimized out>, 
>> op=<optimized out>, attrs=attrs@entry=...)
>>     at ../softmmu/memory.c:1504
>> 17 0x0000558f52d078e7 in flatview_write_continue 
>> (fv=fv@entry=0x7f8accbc3b90, addr=addr@entry=103079215124, attrs=..., 
>> ptr=ptr@entry=0x7f8ce6300028, len=len@entry=1, addr1=<optimized out>, 
>> l=<optimized out>, mr=0x558f568f19d0) at 
>> /home/opc/qemu-upstream/include/qemu/host-utils.h:165
>> 18 0x0000558f52d07b06 in flatview_write (fv=0x7f8accbc3b90, 
>> addr=103079215124, attrs=..., buf=0x7f8ce6300028, len=1) at 
>> ../softmmu/physmem.c:2822
>> 19 0x0000558f52d0b36b in address_space_write (as=<optimized out>, 
>> addr=<optimized out>, attrs=..., buf=buf@entry=0x7f8ce6300028, 
>> len=<optimized out>)
>>     at ../softmmu/physmem.c:2914
>> 20 0x0000558f52d0b3da in address_space_rw (as=<optimized out>, 
>> addr=<optimized out>, attrs=...,
>>     attrs@entry=..., buf=buf@entry=0x7f8ce6300028, len=<optimized 
>> out>, is_write=<optimized out>) at ../softmmu/physmem.c:2924
>> 21 0x0000558f52dced09 in kvm_cpu_exec (cpu=cpu@entry=0x558f55c2da60) 
>> at ../accel/kvm/kvm-all.c:2903
>> 22 0x0000558f52dcfabd in kvm_vcpu_thread_fn 
>> (arg=arg@entry=0x558f55c2da60) at ../accel/kvm/kvm-accel-ops.c:49
>> 23 0x0000558f52f9f04a in qemu_thread_start (args=<optimized out>) at 
>> ../util/qemu-thread-posix.c:556
>> 24 0x00007f8ce4392ea5 in start_thread () at /lib64/libpthread.so.0
>> 25 0x00007f8ce40bb9fd in clone () at /lib64/libc.so.6
>>
>> The cause for the assert failure is due to that the vhost_dev index
>> for the ctrl vq was not aligned with actual one in use by the guest.
>> Upon multiqueue feature negotiation in virtio_net_set_multiqueue(),
>> if guest doesn't support multiqueue, the guest vq layout would shrink
>> to a single queue pair, consisting of 3 vqs in total (rx, tx and ctrl).
>> This results in ctrl_vq taking a different vhost_dev group index than
>> the default. We can map vq to the correct vhost_dev group by checking
>> if MQ is supported by guest and successfully negotiated. Since the
>> MQ feature is only present along with CTRL_VQ, we make sure the index
>> 2 is only meant for the control vq while MQ is not supported by guest.
>>
>> Fixes: 22288fe ("virtio-net: vhost control virtqueue support")
>> Suggested-by: Jason Wang <jasowang@redhat.com>
>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>> ---
>>   hw/net/virtio-net.c | 22 ++++++++++++++++++++--
>>   1 file changed, 20 insertions(+), 2 deletions(-)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index ffb3475..8ca0b80 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -3171,8 +3171,17 @@ static NetClientInfo net_virtio_info = {
>>   static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, 
>> int idx)
>>   {
>>       VirtIONet *n = VIRTIO_NET(vdev);
>> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>> +    NetClientState *nc;
>>       assert(n->vhost_started);
>> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
>> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
>> +           error_report("virtio-net: bogus vq index ignored");
>
>
> This seems trigger-able by guest.
Yes, this is trigger-able by either buggy guest or buggy migration flow 
(could be due to remote buggy QEMU). I was not sure if it'll be too 
determined to use LOG_GUEST_ERROR, and doesn't seem it's the convention 
to log guest error in the same file. What's your preference here, switch 
to LOG_GUEST_ERROR, or simply drop the error message?

Thanks,
-Siwei

>
> Other looks good.
>
> Thanks
>
>
>> +           return false;
>> +        }
>> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
>> +    } else {
>> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
>> +    }
>>       return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
>>   }
>>   @@ -3180,8 +3189,17 @@ static void 
>> virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
>>                                              bool mask)
>>   {
>>       VirtIONet *n = VIRTIO_NET(vdev);
>> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>> +    NetClientState *nc;
>>       assert(n->vhost_started);
>> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
>> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
>> +           error_report("virtio-net: bogus vq index ignored");
>> +           return;
>> +        }
>> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
>> +    } else {
>> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
>> +    }
>>       vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
>>                                vdev, idx, mask);
>>   }
>
Si-Wei Liu April 30, 2022, 1:13 a.m. UTC | #4
On 4/28/2022 7:24 PM, Jason Wang wrote:
> On Fri, Apr 29, 2022 at 10:24 AM Jason Wang <jasowang@redhat.com> wrote:
>>
>> 在 2022/4/27 16:30, Si-Wei Liu 写道:
>>> With MQ enabled vdpa device and non-MQ supporting guest e.g.
>>> booting vdpa with mq=on over OVMF of single vqp, below assert
>>> failure is seen:
>>>
>>> ../hw/virtio/vhost-vdpa.c:560: vhost_vdpa_get_vq_index: Assertion `idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs' failed.
>>>
>>> 0  0x00007f8ce3ff3387 in raise () at /lib64/libc.so.6
>>> 1  0x00007f8ce3ff4a78 in abort () at /lib64/libc.so.6
>>> 2  0x00007f8ce3fec1a6 in __assert_fail_base () at /lib64/libc.so.6
>>> 3  0x00007f8ce3fec252 in  () at /lib64/libc.so.6
>>> 4  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:563
>>> 5  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:558
>>> 6  0x0000558f52d7329a in vhost_virtqueue_mask (hdev=0x558f55c01800, vdev=0x558f568f91f0, n=2, mask=<optimized out>) at ../hw/virtio/vhost.c:1557
>>> 7  0x0000558f52c6b89a in virtio_pci_set_guest_notifier (d=d@entry=0x558f568f0f60, n=n@entry=2, assign=assign@entry=true, with_irqfd=with_irqfd@entry=false)
>>>      at ../hw/virtio/virtio-pci.c:974
>>> 8  0x0000558f52c6c0d8 in virtio_pci_set_guest_notifiers (d=0x558f568f0f60, nvqs=3, assign=true) at ../hw/virtio/virtio-pci.c:1019
>>> 9  0x0000558f52bf091d in vhost_net_start (dev=dev@entry=0x558f568f91f0, ncs=0x558f56937cd0, data_queue_pairs=data_queue_pairs@entry=1, cvq=cvq@entry=1)
>>>      at ../hw/net/vhost_net.c:361
>>> 10 0x0000558f52d4e5e7 in virtio_net_set_status (status=<optimized out>, n=0x558f568f91f0) at ../hw/net/virtio-net.c:289
>>> 11 0x0000558f52d4e5e7 in virtio_net_set_status (vdev=0x558f568f91f0, status=15 '\017') at ../hw/net/virtio-net.c:370
>>> 12 0x0000558f52d6c4b2 in virtio_set_status (vdev=vdev@entry=0x558f568f91f0, val=val@entry=15 '\017') at ../hw/virtio/virtio.c:1945
>>> 13 0x0000558f52c69eff in virtio_pci_common_write (opaque=0x558f568f0f60, addr=<optimized out>, val=<optimized out>, size=<optimized out>) at ../hw/virtio/virtio-pci.c:1292
>>> 14 0x0000558f52d15d6e in memory_region_write_accessor (mr=0x558f568f19d0, addr=20, value=<optimized out>, size=1, shift=<optimized out>, mask=<optimized out>, attrs=...)
>>>      at ../softmmu/memory.c:492
>>> 15 0x0000558f52d127de in access_with_adjusted_size (addr=addr@entry=20, value=value@entry=0x7f8cdbffe748, size=size@entry=1, access_size_min=<optimized out>, access_size_max=<optimized out>, access_fn=0x558f52d15cf0 <memory_region_write_accessor>, mr=0x558f568f19d0, attrs=...) at ../softmmu/memory.c:554
>>> 16 0x0000558f52d157ef in memory_region_dispatch_write (mr=mr@entry=0x558f568f19d0, addr=20, data=<optimized out>, op=<optimized out>, attrs=attrs@entry=...)
>>>      at ../softmmu/memory.c:1504
>>> 17 0x0000558f52d078e7 in flatview_write_continue (fv=fv@entry=0x7f8accbc3b90, addr=addr@entry=103079215124, attrs=..., ptr=ptr@entry=0x7f8ce6300028, len=len@entry=1, addr1=<optimized out>, l=<optimized out>, mr=0x558f568f19d0) at /home/opc/qemu-upstream/include/qemu/host-utils.h:165
>>> 18 0x0000558f52d07b06 in flatview_write (fv=0x7f8accbc3b90, addr=103079215124, attrs=..., buf=0x7f8ce6300028, len=1) at ../softmmu/physmem.c:2822
>>> 19 0x0000558f52d0b36b in address_space_write (as=<optimized out>, addr=<optimized out>, attrs=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>)
>>>      at ../softmmu/physmem.c:2914
>>> 20 0x0000558f52d0b3da in address_space_rw (as=<optimized out>, addr=<optimized out>, attrs=...,
>>>      attrs@entry=..., buf=buf@entry=0x7f8ce6300028, len=<optimized out>, is_write=<optimized out>) at ../softmmu/physmem.c:2924
>>> 21 0x0000558f52dced09 in kvm_cpu_exec (cpu=cpu@entry=0x558f55c2da60) at ../accel/kvm/kvm-all.c:2903
>>> 22 0x0000558f52dcfabd in kvm_vcpu_thread_fn (arg=arg@entry=0x558f55c2da60) at ../accel/kvm/kvm-accel-ops.c:49
>>> 23 0x0000558f52f9f04a in qemu_thread_start (args=<optimized out>) at ../util/qemu-thread-posix.c:556
>>> 24 0x00007f8ce4392ea5 in start_thread () at /lib64/libpthread.so.0
>>> 25 0x00007f8ce40bb9fd in clone () at /lib64/libc.so.6
>>>
>>> The cause for the assert failure is due to that the vhost_dev index
>>> for the ctrl vq was not aligned with actual one in use by the guest.
>>> Upon multiqueue feature negotiation in virtio_net_set_multiqueue(),
>>> if guest doesn't support multiqueue, the guest vq layout would shrink
>>> to a single queue pair, consisting of 3 vqs in total (rx, tx and ctrl).
>>> This results in ctrl_vq taking a different vhost_dev group index than
>>> the default. We can map vq to the correct vhost_dev group by checking
>>> if MQ is supported by guest and successfully negotiated. Since the
>>> MQ feature is only present along with CTRL_VQ, we make sure the index
>>> 2 is only meant for the control vq while MQ is not supported by guest.
>>>
>>> Fixes: 22288fe ("virtio-net: vhost control virtqueue support")
>>> Suggested-by: Jason Wang <jasowang@redhat.com>
>>> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
>>> ---
>>>    hw/net/virtio-net.c | 22 ++++++++++++++++++++--
>>>    1 file changed, 20 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>> index ffb3475..8ca0b80 100644
>>> --- a/hw/net/virtio-net.c
>>> +++ b/hw/net/virtio-net.c
>>> @@ -3171,8 +3171,17 @@ static NetClientInfo net_virtio_info = {
>>>    static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
>>>    {
>>>        VirtIONet *n = VIRTIO_NET(vdev);
>>> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>>> +    NetClientState *nc;
>>>        assert(n->vhost_started);
>>> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
>>> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
>>> +           error_report("virtio-net: bogus vq index ignored");
>>
>> This seems trigger-able by guest.
>>
>> Other looks good.
> Btw, it would be better to add a comment to explain here.
Yep, will add.

-Siwei

>
> Thanks
>
>> Thanks
>>
>>
>>> +           return false;
>>> +        }
>>> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
>>> +    } else {
>>> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
>>> +    }
>>>        return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
>>>    }
>>>
>>> @@ -3180,8 +3189,17 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
>>>                                               bool mask)
>>>    {
>>>        VirtIONet *n = VIRTIO_NET(vdev);
>>> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
>>> +    NetClientState *nc;
>>>        assert(n->vhost_started);
>>> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
>>> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
>>> +           error_report("virtio-net: bogus vq index ignored");
>>> +           return;
>>> +        }
>>> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
>>> +    } else {
>>> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
>>> +    }
>>>        vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
>>>                                 vdev, idx, mask);
>>>    }
Jason Wang May 5, 2022, 8:25 a.m. UTC | #5
On Sat, Apr 30, 2022 at 9:13 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
>
>
> On 4/28/2022 7:23 PM, Jason Wang wrote:
> >
> > 在 2022/4/27 16:30, Si-Wei Liu 写道:
> >> With MQ enabled vdpa device and non-MQ supporting guest e.g.
> >> booting vdpa with mq=on over OVMF of single vqp, below assert
> >> failure is seen:
> >>
> >> ../hw/virtio/vhost-vdpa.c:560: vhost_vdpa_get_vq_index: Assertion
> >> `idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs' failed.
> >>
> >> 0  0x00007f8ce3ff3387 in raise () at /lib64/libc.so.6
> >> 1  0x00007f8ce3ff4a78 in abort () at /lib64/libc.so.6
> >> 2  0x00007f8ce3fec1a6 in __assert_fail_base () at /lib64/libc.so.6
> >> 3  0x00007f8ce3fec252 in  () at /lib64/libc.so.6
> >> 4  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized
> >> out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:563
> >> 5  0x0000558f52d79421 in vhost_vdpa_get_vq_index (dev=<optimized
> >> out>, idx=<optimized out>) at ../hw/virtio/vhost-vdpa.c:558
> >> 6  0x0000558f52d7329a in vhost_virtqueue_mask (hdev=0x558f55c01800,
> >> vdev=0x558f568f91f0, n=2, mask=<optimized out>) at
> >> ../hw/virtio/vhost.c:1557
> >> 7  0x0000558f52c6b89a in virtio_pci_set_guest_notifier
> >> (d=d@entry=0x558f568f0f60, n=n@entry=2, assign=assign@entry=true,
> >> with_irqfd=with_irqfd@entry=false)
> >>     at ../hw/virtio/virtio-pci.c:974
> >> 8  0x0000558f52c6c0d8 in virtio_pci_set_guest_notifiers
> >> (d=0x558f568f0f60, nvqs=3, assign=true) at
> >> ../hw/virtio/virtio-pci.c:1019
> >> 9  0x0000558f52bf091d in vhost_net_start
> >> (dev=dev@entry=0x558f568f91f0, ncs=0x558f56937cd0,
> >> data_queue_pairs=data_queue_pairs@entry=1, cvq=cvq@entry=1)
> >>     at ../hw/net/vhost_net.c:361
> >> 10 0x0000558f52d4e5e7 in virtio_net_set_status (status=<optimized
> >> out>, n=0x558f568f91f0) at ../hw/net/virtio-net.c:289
> >> 11 0x0000558f52d4e5e7 in virtio_net_set_status (vdev=0x558f568f91f0,
> >> status=15 '\017') at ../hw/net/virtio-net.c:370
> >> 12 0x0000558f52d6c4b2 in virtio_set_status
> >> (vdev=vdev@entry=0x558f568f91f0, val=val@entry=15 '\017') at
> >> ../hw/virtio/virtio.c:1945
> >> 13 0x0000558f52c69eff in virtio_pci_common_write
> >> (opaque=0x558f568f0f60, addr=<optimized out>, val=<optimized out>,
> >> size=<optimized out>) at ../hw/virtio/virtio-pci.c:1292
> >> 14 0x0000558f52d15d6e in memory_region_write_accessor
> >> (mr=0x558f568f19d0, addr=20, value=<optimized out>, size=1,
> >> shift=<optimized out>, mask=<optimized out>, attrs=...)
> >>     at ../softmmu/memory.c:492
> >> 15 0x0000558f52d127de in access_with_adjusted_size
> >> (addr=addr@entry=20, value=value@entry=0x7f8cdbffe748,
> >> size=size@entry=1, access_size_min=<optimized out>,
> >> access_size_max=<optimized out>, access_fn=0x558f52d15cf0
> >> <memory_region_write_accessor>, mr=0x558f568f19d0, attrs=...) at
> >> ../softmmu/memory.c:554
> >> 16 0x0000558f52d157ef in memory_region_dispatch_write
> >> (mr=mr@entry=0x558f568f19d0, addr=20, data=<optimized out>,
> >> op=<optimized out>, attrs=attrs@entry=...)
> >>     at ../softmmu/memory.c:1504
> >> 17 0x0000558f52d078e7 in flatview_write_continue
> >> (fv=fv@entry=0x7f8accbc3b90, addr=addr@entry=103079215124, attrs=...,
> >> ptr=ptr@entry=0x7f8ce6300028, len=len@entry=1, addr1=<optimized out>,
> >> l=<optimized out>, mr=0x558f568f19d0) at
> >> /home/opc/qemu-upstream/include/qemu/host-utils.h:165
> >> 18 0x0000558f52d07b06 in flatview_write (fv=0x7f8accbc3b90,
> >> addr=103079215124, attrs=..., buf=0x7f8ce6300028, len=1) at
> >> ../softmmu/physmem.c:2822
> >> 19 0x0000558f52d0b36b in address_space_write (as=<optimized out>,
> >> addr=<optimized out>, attrs=..., buf=buf@entry=0x7f8ce6300028,
> >> len=<optimized out>)
> >>     at ../softmmu/physmem.c:2914
> >> 20 0x0000558f52d0b3da in address_space_rw (as=<optimized out>,
> >> addr=<optimized out>, attrs=...,
> >>     attrs@entry=..., buf=buf@entry=0x7f8ce6300028, len=<optimized
> >> out>, is_write=<optimized out>) at ../softmmu/physmem.c:2924
> >> 21 0x0000558f52dced09 in kvm_cpu_exec (cpu=cpu@entry=0x558f55c2da60)
> >> at ../accel/kvm/kvm-all.c:2903
> >> 22 0x0000558f52dcfabd in kvm_vcpu_thread_fn
> >> (arg=arg@entry=0x558f55c2da60) at ../accel/kvm/kvm-accel-ops.c:49
> >> 23 0x0000558f52f9f04a in qemu_thread_start (args=<optimized out>) at
> >> ../util/qemu-thread-posix.c:556
> >> 24 0x00007f8ce4392ea5 in start_thread () at /lib64/libpthread.so.0
> >> 25 0x00007f8ce40bb9fd in clone () at /lib64/libc.so.6
> >>
> >> The cause for the assert failure is due to that the vhost_dev index
> >> for the ctrl vq was not aligned with actual one in use by the guest.
> >> Upon multiqueue feature negotiation in virtio_net_set_multiqueue(),
> >> if guest doesn't support multiqueue, the guest vq layout would shrink
> >> to a single queue pair, consisting of 3 vqs in total (rx, tx and ctrl).
> >> This results in ctrl_vq taking a different vhost_dev group index than
> >> the default. We can map vq to the correct vhost_dev group by checking
> >> if MQ is supported by guest and successfully negotiated. Since the
> >> MQ feature is only present along with CTRL_VQ, we make sure the index
> >> 2 is only meant for the control vq while MQ is not supported by guest.
> >>
> >> Fixes: 22288fe ("virtio-net: vhost control virtqueue support")
> >> Suggested-by: Jason Wang <jasowang@redhat.com>
> >> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> >> ---
> >>   hw/net/virtio-net.c | 22 ++++++++++++++++++++--
> >>   1 file changed, 20 insertions(+), 2 deletions(-)
> >>
> >> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> >> index ffb3475..8ca0b80 100644
> >> --- a/hw/net/virtio-net.c
> >> +++ b/hw/net/virtio-net.c
> >> @@ -3171,8 +3171,17 @@ static NetClientInfo net_virtio_info = {
> >>   static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev,
> >> int idx)
> >>   {
> >>       VirtIONet *n = VIRTIO_NET(vdev);
> >> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >> +    NetClientState *nc;
> >>       assert(n->vhost_started);
> >> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
> >> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
> >> +           error_report("virtio-net: bogus vq index ignored");
> >
> >
> > This seems trigger-able by guest.
> Yes, this is trigger-able by either buggy guest or buggy migration flow
> (could be due to remote buggy QEMU). I was not sure if it'll be too
> determined to use LOG_GUEST_ERROR, and doesn't seem it's the convention
> to log guest error in the same file. What's your preference here, switch
> to LOG_GUEST_ERROR, or simply drop the error message?

I think it's better to use LOG_GUEST_ERROR here.

Thanks

>
> Thanks,
> -Siwei
>
> >
> > Other looks good.
> >
> > Thanks
> >
> >
> >> +           return false;
> >> +        }
> >> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
> >> +    } else {
> >> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >> +    }
> >>       return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
> >>   }
> >>   @@ -3180,8 +3189,17 @@ static void
> >> virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
> >>                                              bool mask)
> >>   {
> >>       VirtIONet *n = VIRTIO_NET(vdev);
> >> -    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >> +    NetClientState *nc;
> >>       assert(n->vhost_started);
> >> +    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
> >> +        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
> >> +           error_report("virtio-net: bogus vq index ignored");
> >> +           return;
> >> +        }
> >> +        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
> >> +    } else {
> >> +        nc = qemu_get_subqueue(n->nic, vq2q(idx));
> >> +    }
> >>       vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
> >>                                vdev, idx, mask);
> >>   }
> >
>
diff mbox series

Patch

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index ffb3475..8ca0b80 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3171,8 +3171,17 @@  static NetClientInfo net_virtio_info = {
 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
-    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
+    NetClientState *nc;
     assert(n->vhost_started);
+    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
+        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
+           error_report("virtio-net: bogus vq index ignored");
+           return false;
+        }
+        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
+    } else {
+        nc = qemu_get_subqueue(n->nic, vq2q(idx));
+    }
     return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
 }
 
@@ -3180,8 +3189,17 @@  static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
                                            bool mask)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
-    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
+    NetClientState *nc;
     assert(n->vhost_started);
+    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
+        if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
+           error_report("virtio-net: bogus vq index ignored");
+           return;
+        }
+        nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
+    } else {
+        nc = qemu_get_subqueue(n->nic, vq2q(idx));
+    }
     vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
                              vdev, idx, mask);
 }