Message ID | 20230822085330.3978829-5-eperezma@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Enable vdpa net migration with features depending on CVQ | expand |
On Tue, Aug 22, 2023 at 4:53 PM Eugenio Pérez <eperezma@redhat.com> wrote: > > Doing that way allows CVQ to be enabled before the dataplane vqs, > restoring the state as MQ or MAC addresses properly in the case of a > migration. > > The patch does it by defining a ->load NetClientInfo callback also for > dataplane. Ideally, this should be done by an independent patch, but > the function is already static so it would only add an empty > vhost_vdpa_net_data_load stub. > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Thanks > --- > v3: > * Fix subject typo > * Expand patch message so it explains why > --- > hw/virtio/vdpa-dev.c | 3 +++ > hw/virtio/vhost-vdpa.c | 3 --- > net/vhost-vdpa.c | 57 +++++++++++++++++++++++++++++------------- > 3 files changed, 42 insertions(+), 21 deletions(-) > > diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c > index 363b625243..f22d5d5bc0 100644 > --- a/hw/virtio/vdpa-dev.c > +++ b/hw/virtio/vdpa-dev.c > @@ -255,6 +255,9 @@ static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp) > error_setg_errno(errp, -ret, "Error starting vhost"); > goto err_guest_notifiers; > } > + for (i = 0; i < s->dev.nvqs; ++i) { > + vhost_vdpa_set_vring_ready(&s->vdpa, i); > + } > s->started = true; > > /* > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c > index 0d9975b5b5..8ca2e3800c 100644 > --- a/hw/virtio/vhost-vdpa.c > +++ b/hw/virtio/vhost-vdpa.c > @@ -1297,9 +1297,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) > if (unlikely(!ok)) { > return -1; > } > - for (int i = 0; i < dev->nvqs; ++i) { > - vhost_vdpa_set_vring_ready(v, dev->vq_index + i); > - } > } else { > vhost_vdpa_suspend(dev); > vhost_vdpa_svqs_stop(dev); > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 9251351b4b..3bf60f9431 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -371,6 +371,22 @@ static int vhost_vdpa_net_data_start(NetClientState *nc) > return 0; > } > > +static int vhost_vdpa_net_data_load(NetClientState *nc) > +{ > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > + struct vhost_vdpa *v = &s->vhost_vdpa; > + bool has_cvq = v->dev->vq_index_end % 2; > + > + if (has_cvq) { > + return 0; > + } > + > + for (int i = 0; i < v->dev->nvqs; ++i) { > + vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); > + } > + return 0; > +} > + > static void vhost_vdpa_net_client_stop(NetClientState *nc) > { > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > @@ -393,6 +409,7 @@ static NetClientInfo net_vhost_vdpa_info = { > .size = sizeof(VhostVDPAState), > .receive = vhost_vdpa_receive, > .start = vhost_vdpa_net_data_start, > + .load = vhost_vdpa_net_data_load, > .stop = vhost_vdpa_net_client_stop, > .cleanup = vhost_vdpa_cleanup, > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, > @@ -974,26 +991,30 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) > > assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); > > - if (!v->shadow_vqs_enabled) { > - return 0; > - } > + vhost_vdpa_set_vring_ready(v, v->dev->vq_index); > > - n = VIRTIO_NET(v->dev->vdev); > - r = vhost_vdpa_net_load_mac(s, n); > - if (unlikely(r < 0)) { > - return r; > - } > - r = vhost_vdpa_net_load_mq(s, n); > - if (unlikely(r)) { > - return r; > - } > - r = vhost_vdpa_net_load_offloads(s, n); > - if (unlikely(r)) { > - return r; > + if (v->shadow_vqs_enabled) { > + n = VIRTIO_NET(v->dev->vdev); > + r = vhost_vdpa_net_load_mac(s, n); > + if (unlikely(r < 0)) { > + return r; > + } > + r = vhost_vdpa_net_load_mq(s, n); > + if (unlikely(r)) { > + return r; > + } > + r = vhost_vdpa_net_load_offloads(s, n); > + if (unlikely(r)) { > + return r; > + } > + r = vhost_vdpa_net_load_rx(s, n); > + if (unlikely(r)) { > + return r; > + } > } > - r = vhost_vdpa_net_load_rx(s, n); > - if (unlikely(r)) { > - return r; > + > + for (int i = 0; i < v->dev->vq_index; ++i) { > + vhost_vdpa_set_vring_ready(v, i); > } > > return 0; > -- > 2.39.3 >
diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c index 363b625243..f22d5d5bc0 100644 --- a/hw/virtio/vdpa-dev.c +++ b/hw/virtio/vdpa-dev.c @@ -255,6 +255,9 @@ static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp) error_setg_errno(errp, -ret, "Error starting vhost"); goto err_guest_notifiers; } + for (i = 0; i < s->dev.nvqs; ++i) { + vhost_vdpa_set_vring_ready(&s->vdpa, i); + } s->started = true; /* diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 0d9975b5b5..8ca2e3800c 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -1297,9 +1297,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) if (unlikely(!ok)) { return -1; } - for (int i = 0; i < dev->nvqs; ++i) { - vhost_vdpa_set_vring_ready(v, dev->vq_index + i); - } } else { vhost_vdpa_suspend(dev); vhost_vdpa_svqs_stop(dev); diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 9251351b4b..3bf60f9431 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -371,6 +371,22 @@ static int vhost_vdpa_net_data_start(NetClientState *nc) return 0; } +static int vhost_vdpa_net_data_load(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_vdpa *v = &s->vhost_vdpa; + bool has_cvq = v->dev->vq_index_end % 2; + + if (has_cvq) { + return 0; + } + + for (int i = 0; i < v->dev->nvqs; ++i) { + vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); + } + return 0; +} + static void vhost_vdpa_net_client_stop(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); @@ -393,6 +409,7 @@ static NetClientInfo net_vhost_vdpa_info = { .size = sizeof(VhostVDPAState), .receive = vhost_vdpa_receive, .start = vhost_vdpa_net_data_start, + .load = vhost_vdpa_net_data_load, .stop = vhost_vdpa_net_client_stop, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, @@ -974,26 +991,30 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); - if (!v->shadow_vqs_enabled) { - return 0; - } + vhost_vdpa_set_vring_ready(v, v->dev->vq_index); - n = VIRTIO_NET(v->dev->vdev); - r = vhost_vdpa_net_load_mac(s, n); - if (unlikely(r < 0)) { - return r; - } - r = vhost_vdpa_net_load_mq(s, n); - if (unlikely(r)) { - return r; - } - r = vhost_vdpa_net_load_offloads(s, n); - if (unlikely(r)) { - return r; + if (v->shadow_vqs_enabled) { + n = VIRTIO_NET(v->dev->vdev); + r = vhost_vdpa_net_load_mac(s, n); + if (unlikely(r < 0)) { + return r; + } + r = vhost_vdpa_net_load_mq(s, n); + if (unlikely(r)) { + return r; + } + r = vhost_vdpa_net_load_offloads(s, n); + if (unlikely(r)) { + return r; + } + r = vhost_vdpa_net_load_rx(s, n); + if (unlikely(r)) { + return r; + } } - r = vhost_vdpa_net_load_rx(s, n); - if (unlikely(r)) { - return r; + + for (int i = 0; i < v->dev->vq_index; ++i) { + vhost_vdpa_set_vring_ready(v, i); } return 0;
Doing that way allows CVQ to be enabled before the dataplane vqs, restoring the state as MQ or MAC addresses properly in the case of a migration. The patch does it by defining a ->load NetClientInfo callback also for dataplane. Ideally, this should be done by an independent patch, but the function is already static so it would only add an empty vhost_vdpa_net_data_load stub. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> --- v3: * Fix subject typo * Expand patch message so it explains why --- hw/virtio/vdpa-dev.c | 3 +++ hw/virtio/vhost-vdpa.c | 3 --- net/vhost-vdpa.c | 57 +++++++++++++++++++++++++++++------------- 3 files changed, 42 insertions(+), 21 deletions(-)