Message ID | 20221014132004.114602-3-lvivier@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | virtio-net: re-arm/re-schedule when tx_burst stops virtio_net_flush_tx() | expand |
On Fri, Oct 14, 2022 at 9:20 PM Laurent Vivier <lvivier@redhat.com> wrote: > > When virtio_net_flush_tx() reaches the tx_burst value all > the queue is not flushed and nothing restart the timer. > > Fix that by doing for TX timer as we do for bottom half TX: > rearming the timer if we find any packet to send during the > virtio_net_flush_tx() call. > > Fixes: e3f30488e5f8 ("virtio-net: Limit number of packets sent per TX flush") > Cc: alex.williamson@redhat.com > Signed-off-by: Laurent Vivier <lvivier@redhat.com> > --- > hw/net/virtio-net.c | 59 +++++++++++++++++++++++++++++++++++---------- > 1 file changed, 46 insertions(+), 13 deletions(-) > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c > index 1fbf2f3e19a7..b4964b821021 100644 > --- a/hw/net/virtio-net.c > +++ b/hw/net/virtio-net.c > @@ -2536,14 +2536,19 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) > > virtio_queue_set_notification(q->tx_vq, 1); > ret = virtio_net_flush_tx(q); > - if (q->tx_bh && ret >= n->tx_burst) { > + if (ret >= n->tx_burst) { > /* > * the flush has been stopped by tx_burst > * we will not receive notification for the > * remainining part, so re-schedule > */ > virtio_queue_set_notification(q->tx_vq, 0); > - qemu_bh_schedule(q->tx_bh); > + if (q->tx_bh) { > + qemu_bh_schedule(q->tx_bh); > + } else { > + timer_mod(q->tx_timer, > + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); > + } > q->tx_waiting = 1; > } > } > @@ -2644,6 +2649,8 @@ drop: > return num_packets; > } > > +static void virtio_net_tx_timer(void *opaque); > + > static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) > { > VirtIONet *n = VIRTIO_NET(vdev); > @@ -2661,18 +2668,17 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) > } > > if (q->tx_waiting) { > - virtio_queue_set_notification(vq, 1); > + /* We already have queued packets, immediately flush */ > timer_del(q->tx_timer); > - q->tx_waiting = 0; > - if (virtio_net_flush_tx(q) == -EINVAL) { > - return; > - } > - } else { > - timer_mod(q->tx_timer, > - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); > - q->tx_waiting = 1; > - virtio_queue_set_notification(vq, 0); > + virtio_net_tx_timer(q); > + return; > } > + > + /* re-arm timer to flush it (and more) on next tick */ > + timer_mod(q->tx_timer, > + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); > + q->tx_waiting = 1; > + virtio_queue_set_notification(vq, 0); > } Nit: if we stick the above in the else, we can avoid a lot of changes. Others look good. Thanks > > static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) > @@ -2702,6 +2708,8 @@ static void virtio_net_tx_timer(void *opaque) > VirtIONetQueue *q = opaque; > VirtIONet *n = q->n; > VirtIODevice *vdev = VIRTIO_DEVICE(n); > + int ret; > + > /* This happens when device was stopped but BH wasn't. */ > if (!vdev->vm_running) { > /* Make sure tx waiting is set, so we'll run when restarted. */ > @@ -2716,8 +2724,33 @@ static void virtio_net_tx_timer(void *opaque) > return; > } > > + ret = virtio_net_flush_tx(q); > + if (ret == -EBUSY || ret == -EINVAL) { > + return; > + } > + /* > + * If we flush a full burst of packets, assume there are > + * more coming and immediately rearm > + */ > + if (ret >= n->tx_burst) { > + q->tx_waiting = 1; > + timer_mod(q->tx_timer, > + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); > + return; > + } > + /* > + * If less than a full burst, re-enable notification and flush > + * anything that may have come in while we weren't looking. If > + * we find something, assume the guest is still active and rearm > + */ > virtio_queue_set_notification(q->tx_vq, 1); > - virtio_net_flush_tx(q); > + ret = virtio_net_flush_tx(q); > + if (ret > 0) { > + virtio_queue_set_notification(q->tx_vq, 0); > + q->tx_waiting = 1; > + timer_mod(q->tx_timer, > + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); > + } > } > > static void virtio_net_tx_bh(void *opaque) > -- > 2.37.3 >
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 1fbf2f3e19a7..b4964b821021 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2536,14 +2536,19 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) virtio_queue_set_notification(q->tx_vq, 1); ret = virtio_net_flush_tx(q); - if (q->tx_bh && ret >= n->tx_burst) { + if (ret >= n->tx_burst) { /* * the flush has been stopped by tx_burst * we will not receive notification for the * remainining part, so re-schedule */ virtio_queue_set_notification(q->tx_vq, 0); - qemu_bh_schedule(q->tx_bh); + if (q->tx_bh) { + qemu_bh_schedule(q->tx_bh); + } else { + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } q->tx_waiting = 1; } } @@ -2644,6 +2649,8 @@ drop: return num_packets; } +static void virtio_net_tx_timer(void *opaque); + static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = VIRTIO_NET(vdev); @@ -2661,18 +2668,17 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) } if (q->tx_waiting) { - virtio_queue_set_notification(vq, 1); + /* We already have queued packets, immediately flush */ timer_del(q->tx_timer); - q->tx_waiting = 0; - if (virtio_net_flush_tx(q) == -EINVAL) { - return; - } - } else { - timer_mod(q->tx_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); - q->tx_waiting = 1; - virtio_queue_set_notification(vq, 0); + virtio_net_tx_timer(q); + return; } + + /* re-arm timer to flush it (and more) on next tick */ + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + q->tx_waiting = 1; + virtio_queue_set_notification(vq, 0); } static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) @@ -2702,6 +2708,8 @@ static void virtio_net_tx_timer(void *opaque) VirtIONetQueue *q = opaque; VirtIONet *n = q->n; VirtIODevice *vdev = VIRTIO_DEVICE(n); + int ret; + /* This happens when device was stopped but BH wasn't. */ if (!vdev->vm_running) { /* Make sure tx waiting is set, so we'll run when restarted. */ @@ -2716,8 +2724,33 @@ static void virtio_net_tx_timer(void *opaque) return; } + ret = virtio_net_flush_tx(q); + if (ret == -EBUSY || ret == -EINVAL) { + return; + } + /* + * If we flush a full burst of packets, assume there are + * more coming and immediately rearm + */ + if (ret >= n->tx_burst) { + q->tx_waiting = 1; + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + return; + } + /* + * If less than a full burst, re-enable notification and flush + * anything that may have come in while we weren't looking. If + * we find something, assume the guest is still active and rearm + */ virtio_queue_set_notification(q->tx_vq, 1); - virtio_net_flush_tx(q); + ret = virtio_net_flush_tx(q); + if (ret > 0) { + virtio_queue_set_notification(q->tx_vq, 0); + q->tx_waiting = 1; + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } } static void virtio_net_tx_bh(void *opaque)
When virtio_net_flush_tx() reaches the tx_burst value all the queue is not flushed and nothing restart the timer. Fix that by doing for TX timer as we do for bottom half TX: rearming the timer if we find any packet to send during the virtio_net_flush_tx() call. Fixes: e3f30488e5f8 ("virtio-net: Limit number of packets sent per TX flush") Cc: alex.williamson@redhat.com Signed-off-by: Laurent Vivier <lvivier@redhat.com> --- hw/net/virtio-net.c | 59 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 13 deletions(-)