diff mbox series

[05/40] vdpa: populate desc_group from net_vhost_vdpa_init

Message ID 1701970793-6865-6-git-send-email-si-wei.liu@oracle.com (mailing list archive)
State New, archived
Headers show
Series vdpa-net: improve migration downtime through descriptor ASID and persistent IOTLB | expand

Commit Message

Si-Wei Liu Dec. 7, 2023, 5:39 p.m. UTC
Add the desc_group field to struct vhost_vdpa, and get it
populated when the corresponding vq is initialized at
net_vhost_vdpa_init. If the vq does not have descriptor
group capability, or it doesn't have a dedicated ASID
group to host descriptors other than the data buffers,
desc_group will be set to a negative value -1.

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 include/hw/virtio/vhost-vdpa.h |  1 +
 net/vhost-vdpa.c               | 15 +++++++++++++--
 2 files changed, 14 insertions(+), 2 deletions(-)

Comments

Eugenio Perez Martin Dec. 11, 2023, 10:46 a.m. UTC | #1
On Thu, Dec 7, 2023 at 7:50 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Add the desc_group field to struct vhost_vdpa, and get it
> populated when the corresponding vq is initialized at
> net_vhost_vdpa_init. If the vq does not have descriptor
> group capability, or it doesn't have a dedicated ASID
> group to host descriptors other than the data buffers,
> desc_group will be set to a negative value -1.
>

We should use a defined constant. As always, I don't have a good name
though :). DESC_GROUP_SAME_AS_BUFFERS_GROUP?

> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  include/hw/virtio/vhost-vdpa.h |  1 +
>  net/vhost-vdpa.c               | 15 +++++++++++++--
>  2 files changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 6533ad2..63493ff 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -87,6 +87,7 @@ typedef struct vhost_vdpa {
>      Error *migration_blocker;
>      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
>      IOMMUNotifier n;
> +    int64_t desc_group;
>  } VhostVDPA;
>
>  int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index cb5705d..1a738b2 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -1855,11 +1855,22 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>
>      ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
>      if (ret) {
> -        qemu_del_net_client(nc);
> -        return NULL;
> +        goto err;
>      }
>
> +    if (is_datapath) {
> +        ret = vhost_vdpa_probe_desc_group(vdpa_device_fd, features,
> +                                          0, &desc_group, errp);
> +        if (unlikely(ret < 0)) {
> +            goto err;
> +        }
> +    }
> +    s->vhost_vdpa.desc_group = desc_group;

Why not do the probe at the same time as the CVQ isolation probe? It
would save all the effort to restore the previous device status, not
to mention not needed to initialize and reset the device so many times
for the probing. The error unwinding is not needed here that way.

I think the most controversial part is how to know the right vring
group. When I sent the CVQ probe, I delegated that to the device
startup and we decide it would be weird to have CVQ isolated only in
the MQ case but not in the SQ case. I think we could do the same here
for the sake of making the series simpler: just checking the actual
isolation of vring descriptor group, and then move to save the actual
vring group at initialization if it saves significant time.

Does it make sense to you?

Thanks!

>      return nc;
> +
> +err:
> +    qemu_del_net_client(nc);
> +    return NULL;
>  }
>
>  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
> --
> 1.8.3.1
>
Eugenio Perez Martin Dec. 11, 2023, 11:01 a.m. UTC | #2
On Mon, Dec 11, 2023 at 11:46 AM Eugenio Perez Martin
<eperezma@redhat.com> wrote:
>
> On Thu, Dec 7, 2023 at 7:50 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
> >
> > Add the desc_group field to struct vhost_vdpa, and get it
> > populated when the corresponding vq is initialized at
> > net_vhost_vdpa_init. If the vq does not have descriptor
> > group capability, or it doesn't have a dedicated ASID
> > group to host descriptors other than the data buffers,
> > desc_group will be set to a negative value -1.
> >
>
> We should use a defined constant. As always, I don't have a good name
> though :). DESC_GROUP_SAME_AS_BUFFERS_GROUP?
>
> > Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> > ---
> >  include/hw/virtio/vhost-vdpa.h |  1 +
> >  net/vhost-vdpa.c               | 15 +++++++++++++--
> >  2 files changed, 14 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> > index 6533ad2..63493ff 100644
> > --- a/include/hw/virtio/vhost-vdpa.h
> > +++ b/include/hw/virtio/vhost-vdpa.h
> > @@ -87,6 +87,7 @@ typedef struct vhost_vdpa {
> >      Error *migration_blocker;
> >      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> >      IOMMUNotifier n;
> > +    int64_t desc_group;
> >  } VhostVDPA;
> >
> >  int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index cb5705d..1a738b2 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -1855,11 +1855,22 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> >
> >      ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> >      if (ret) {
> > -        qemu_del_net_client(nc);
> > -        return NULL;
> > +        goto err;
> >      }
> >
> > +    if (is_datapath) {
> > +        ret = vhost_vdpa_probe_desc_group(vdpa_device_fd, features,
> > +                                          0, &desc_group, errp);

Also, it is always checking for the vring group of the first virtqueue
of the vdpa parent device, isn't it? The 3rd parameter should be
queue_pair_index*2.

Even with queue_pair_index*2., we're also assuming tx queue will have
the same vring group as rx. While I think this is a valid assumption,
maybe it is better to probe it at initialization and act as if the
device does not have VHOST_BACKEND_F_DESC_ASID if we find otherwise?

Thanks!


> > +        if (unlikely(ret < 0)) {
> > +            goto err;
> > +        }
> > +    }
> > +    s->vhost_vdpa.desc_group = desc_group;
>
> Why not do the probe at the same time as the CVQ isolation probe? It
> would save all the effort to restore the previous device status, not
> to mention not needed to initialize and reset the device so many times
> for the probing. The error unwinding is not needed here that way.
>
> I think the most controversial part is how to know the right vring
> group. When I sent the CVQ probe, I delegated that to the device
> startup and we decide it would be weird to have CVQ isolated only in
> the MQ case but not in the SQ case. I think we could do the same here
> for the sake of making the series simpler: just checking the actual
> isolation of vring descriptor group, and then move to save the actual
> vring group at initialization if it saves significant time.
>
> Does it make sense to you?
>
> Thanks!
>
> >      return nc;
> > +
> > +err:
> > +    qemu_del_net_client(nc);
> > +    return NULL;
> >  }
> >
> >  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
> > --
> > 1.8.3.1
> >
Jason Wang Jan. 11, 2024, 7:09 a.m. UTC | #3
On Fri, Dec 8, 2023 at 2:50 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Add the desc_group field to struct vhost_vdpa, and get it
> populated when the corresponding vq is initialized at
> net_vhost_vdpa_init. If the vq does not have descriptor
> group capability, or it doesn't have a dedicated ASID
> group to host descriptors other than the data buffers,
> desc_group will be set to a negative value -1.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  include/hw/virtio/vhost-vdpa.h |  1 +
>  net/vhost-vdpa.c               | 15 +++++++++++++--
>  2 files changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 6533ad2..63493ff 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -87,6 +87,7 @@ typedef struct vhost_vdpa {
>      Error *migration_blocker;
>      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
>      IOMMUNotifier n;
> +    int64_t desc_group;
>  } VhostVDPA;
>
>  int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index cb5705d..1a738b2 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -1855,11 +1855,22 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>
>      ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
>      if (ret) {
> -        qemu_del_net_client(nc);
> -        return NULL;
> +        goto err;

This part of introducing the "err" label looks more like a cleanup.

Others look good.

Thanks

>      }
>
> +    if (is_datapath) {
> +        ret = vhost_vdpa_probe_desc_group(vdpa_device_fd, features,
> +                                          0, &desc_group, errp);
> +        if (unlikely(ret < 0)) {
> +            goto err;
> +        }
> +    }
> +    s->vhost_vdpa.desc_group = desc_group;
>      return nc;
> +
> +err:
> +    qemu_del_net_client(nc);
> +    return NULL;
>  }
>
>  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
> --
> 1.8.3.1
>
diff mbox series

Patch

diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 6533ad2..63493ff 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -87,6 +87,7 @@  typedef struct vhost_vdpa {
     Error *migration_blocker;
     VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
     IOMMUNotifier n;
+    int64_t desc_group;
 } VhostVDPA;
 
 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index cb5705d..1a738b2 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -1855,11 +1855,22 @@  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
 
     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
     if (ret) {
-        qemu_del_net_client(nc);
-        return NULL;
+        goto err;
     }
 
+    if (is_datapath) {
+        ret = vhost_vdpa_probe_desc_group(vdpa_device_fd, features,
+                                          0, &desc_group, errp);
+        if (unlikely(ret < 0)) {
+            goto err;
+        }
+    }
+    s->vhost_vdpa.desc_group = desc_group;
     return nc;
+
+err:
+    qemu_del_net_client(nc);
+    return NULL;
 }
 
 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)