diff mbox series

[for,8.0,v8,06/12] vdpa: extract vhost_vdpa_svq_allocate_iova_tree

Message ID 20221124155158.2109884-7-eperezma@redhat.com (mailing list archive)
State New, archived
Headers show
Series ASID support in vhost-vdpa net | expand

Commit Message

Eugenio Perez Martin Nov. 24, 2022, 3:51 p.m. UTC
It can be allocated either if all virtqueues must be shadowed or if
vdpa-net detects it can shadow only cvq.

Extract in its own function so we can reuse it.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 29 +++++++++++++++++------------
 1 file changed, 17 insertions(+), 12 deletions(-)

Comments

Jason Wang Nov. 30, 2022, 6:43 a.m. UTC | #1
On Thu, Nov 24, 2022 at 11:52 PM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> It can be allocated either if all virtqueues must be shadowed or if
> vdpa-net detects it can shadow only cvq.
>
> Extract in its own function so we can reuse it.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>  net/vhost-vdpa.c | 29 +++++++++++++++++------------
>  1 file changed, 17 insertions(+), 12 deletions(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 88e0eec5fa..9ee3bc4cd3 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -240,6 +240,22 @@ static NetClientInfo net_vhost_vdpa_info = {
>          .check_peer_type = vhost_vdpa_check_peer_type,
>  };
>
> +static int vhost_vdpa_get_iova_range(int fd,
> +                                     struct vhost_vdpa_iova_range *iova_range)
> +{
> +    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> +
> +    return ret < 0 ? -errno : 0;
> +}

I don't get why this needs to be moved to net specific code.

Thanks

> +
> +static VhostIOVATree *vhost_vdpa_svq_allocate_iova_tree(int vdpa_device_fd)
> +{
> +    struct vhost_vdpa_iova_range iova_range;
> +
> +    vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> +    return vhost_iova_tree_new(iova_range.first, iova_range.last);
> +}
> +
>  static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
>  {
>      VhostIOVATree *tree = v->iova_tree;
> @@ -587,14 +603,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>      return nc;
>  }
>
> -static int vhost_vdpa_get_iova_range(int fd,
> -                                     struct vhost_vdpa_iova_range *iova_range)
> -{
> -    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> -
> -    return ret < 0 ? -errno : 0;
> -}
> -
>  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
>  {
>      int ret = ioctl(fd, VHOST_GET_FEATURES, features);
> @@ -690,14 +698,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
>      }
>
>      if (opts->x_svq) {
> -        struct vhost_vdpa_iova_range iova_range;
> -
>          if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
>              goto err_svq;
>          }
>
> -        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> -        iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> +        iova_tree = vhost_vdpa_svq_allocate_iova_tree(vdpa_device_fd);
>      }
>
>      ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> --
> 2.31.1
>
Eugenio Perez Martin Nov. 30, 2022, 7:40 a.m. UTC | #2
On Wed, Nov 30, 2022 at 7:43 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Nov 24, 2022 at 11:52 PM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > It can be allocated either if all virtqueues must be shadowed or if
> > vdpa-net detects it can shadow only cvq.
> >
> > Extract in its own function so we can reuse it.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  net/vhost-vdpa.c | 29 +++++++++++++++++------------
> >  1 file changed, 17 insertions(+), 12 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 88e0eec5fa..9ee3bc4cd3 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -240,6 +240,22 @@ static NetClientInfo net_vhost_vdpa_info = {
> >          .check_peer_type = vhost_vdpa_check_peer_type,
> >  };
> >
> > +static int vhost_vdpa_get_iova_range(int fd,
> > +                                     struct vhost_vdpa_iova_range *iova_range)
> > +{
> > +    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > +
> > +    return ret < 0 ? -errno : 0;
> > +}
>
> I don't get why this needs to be moved to net specific code.
>

It was already in net, this code just extracted it in its own function.

It's done in net because iova_tree must be the same for all queuepair
vhost, so we need to allocate before them.

Thanks!

> Thanks
>
> > +
> > +static VhostIOVATree *vhost_vdpa_svq_allocate_iova_tree(int vdpa_device_fd)
> > +{
> > +    struct vhost_vdpa_iova_range iova_range;
> > +
> > +    vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > +    return vhost_iova_tree_new(iova_range.first, iova_range.last);
> > +}
> > +
> >  static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> >  {
> >      VhostIOVATree *tree = v->iova_tree;
> > @@ -587,14 +603,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> >      return nc;
> >  }
> >
> > -static int vhost_vdpa_get_iova_range(int fd,
> > -                                     struct vhost_vdpa_iova_range *iova_range)
> > -{
> > -    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > -
> > -    return ret < 0 ? -errno : 0;
> > -}
> > -
> >  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
> >  {
> >      int ret = ioctl(fd, VHOST_GET_FEATURES, features);
> > @@ -690,14 +698,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> >      }
> >
> >      if (opts->x_svq) {
> > -        struct vhost_vdpa_iova_range iova_range;
> > -
> >          if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
> >              goto err_svq;
> >          }
> >
> > -        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > -        iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > +        iova_tree = vhost_vdpa_svq_allocate_iova_tree(vdpa_device_fd);
> >      }
> >
> >      ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > --
> > 2.31.1
> >
>
Jason Wang Dec. 1, 2022, 8:45 a.m. UTC | #3
On Wed, Nov 30, 2022 at 3:40 PM Eugenio Perez Martin
<eperezma@redhat.com> wrote:
>
> On Wed, Nov 30, 2022 at 7:43 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Thu, Nov 24, 2022 at 11:52 PM Eugenio Pérez <eperezma@redhat.com> wrote:
> > >
> > > It can be allocated either if all virtqueues must be shadowed or if
> > > vdpa-net detects it can shadow only cvq.
> > >
> > > Extract in its own function so we can reuse it.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > >  net/vhost-vdpa.c | 29 +++++++++++++++++------------
> > >  1 file changed, 17 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index 88e0eec5fa..9ee3bc4cd3 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -240,6 +240,22 @@ static NetClientInfo net_vhost_vdpa_info = {
> > >          .check_peer_type = vhost_vdpa_check_peer_type,
> > >  };
> > >
> > > +static int vhost_vdpa_get_iova_range(int fd,
> > > +                                     struct vhost_vdpa_iova_range *iova_range)
> > > +{
> > > +    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > +
> > > +    return ret < 0 ? -errno : 0;
> > > +}
> >
> > I don't get why this needs to be moved to net specific code.
> >
>
> It was already in net, this code just extracted it in its own function.

Ok, there's similar function that in vhost-vdpa.c:

static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
{
    int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
                              &v->iova_range);
    if (ret != 0) {
        v->iova_range.first = 0;
        v->iova_range.last = UINT64_MAX;
    }

    trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
                                    v->iova_range.last);
}

I think we can reuse that.

Thanks

>
> It's done in net because iova_tree must be the same for all queuepair
> vhost, so we need to allocate before them.
>
> Thanks!
>
> > Thanks
> >
> > > +
> > > +static VhostIOVATree *vhost_vdpa_svq_allocate_iova_tree(int vdpa_device_fd)
> > > +{
> > > +    struct vhost_vdpa_iova_range iova_range;
> > > +
> > > +    vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > > +    return vhost_iova_tree_new(iova_range.first, iova_range.last);
> > > +}
> > > +
> > >  static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> > >  {
> > >      VhostIOVATree *tree = v->iova_tree;
> > > @@ -587,14 +603,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > >      return nc;
> > >  }
> > >
> > > -static int vhost_vdpa_get_iova_range(int fd,
> > > -                                     struct vhost_vdpa_iova_range *iova_range)
> > > -{
> > > -    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > -
> > > -    return ret < 0 ? -errno : 0;
> > > -}
> > > -
> > >  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
> > >  {
> > >      int ret = ioctl(fd, VHOST_GET_FEATURES, features);
> > > @@ -690,14 +698,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > >      }
> > >
> > >      if (opts->x_svq) {
> > > -        struct vhost_vdpa_iova_range iova_range;
> > > -
> > >          if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
> > >              goto err_svq;
> > >          }
> > >
> > > -        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > > -        iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > > +        iova_tree = vhost_vdpa_svq_allocate_iova_tree(vdpa_device_fd);
> > >      }
> > >
> > >      ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > > --
> > > 2.31.1
> > >
> >
>
Eugenio Perez Martin Dec. 1, 2022, 9:49 a.m. UTC | #4
On Thu, Dec 1, 2022 at 9:45 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Wed, Nov 30, 2022 at 3:40 PM Eugenio Perez Martin
> <eperezma@redhat.com> wrote:
> >
> > On Wed, Nov 30, 2022 at 7:43 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Thu, Nov 24, 2022 at 11:52 PM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > >
> > > > It can be allocated either if all virtqueues must be shadowed or if
> > > > vdpa-net detects it can shadow only cvq.
> > > >
> > > > Extract in its own function so we can reuse it.
> > > >
> > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > ---
> > > >  net/vhost-vdpa.c | 29 +++++++++++++++++------------
> > > >  1 file changed, 17 insertions(+), 12 deletions(-)
> > > >
> > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > index 88e0eec5fa..9ee3bc4cd3 100644
> > > > --- a/net/vhost-vdpa.c
> > > > +++ b/net/vhost-vdpa.c
> > > > @@ -240,6 +240,22 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > >          .check_peer_type = vhost_vdpa_check_peer_type,
> > > >  };
> > > >
> > > > +static int vhost_vdpa_get_iova_range(int fd,
> > > > +                                     struct vhost_vdpa_iova_range *iova_range)
> > > > +{
> > > > +    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > > +
> > > > +    return ret < 0 ? -errno : 0;
> > > > +}
> > >
> > > I don't get why this needs to be moved to net specific code.
> > >
> >
> > It was already in net, this code just extracted it in its own function.
>
> Ok, there's similar function that in vhost-vdpa.c:
>
> static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
> {
>     int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
>                               &v->iova_range);
>     if (ret != 0) {
>         v->iova_range.first = 0;
>         v->iova_range.last = UINT64_MAX;
>     }
>
>     trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
>                                     v->iova_range.last);
> }
>
> I think we can reuse that.
>

That's right, but I'd do the reverse: I would store iova_min, iova_max
in VhostVDPAState and would set it to vhost_vdpa at
net_vhost_vdpa_init. That way, we only have one ioctl call at the
beginning instead of having (#vq pairs + cvq) calls each time the
device starts. I can send it in a new change if you see it ok.

There are a few functions like that we can reuse in net/. To get the
features and the backend features are two other examples. Even if we
don't cache them since device initialization mandates the read, we
could reduce code duplication that way.

However, they use vhost_dev or vhost_vdpa instead of directly the file
descriptor. Not a big deal but it's an extra step.

What do you think?

Thanks!
Jason Wang Dec. 5, 2022, 4:24 a.m. UTC | #5
On Thu, Dec 1, 2022 at 5:50 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>
> On Thu, Dec 1, 2022 at 9:45 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Wed, Nov 30, 2022 at 3:40 PM Eugenio Perez Martin
> > <eperezma@redhat.com> wrote:
> > >
> > > On Wed, Nov 30, 2022 at 7:43 AM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Thu, Nov 24, 2022 at 11:52 PM Eugenio Pérez <eperezma@redhat.com> wrote:
> > > > >
> > > > > It can be allocated either if all virtqueues must be shadowed or if
> > > > > vdpa-net detects it can shadow only cvq.
> > > > >
> > > > > Extract in its own function so we can reuse it.
> > > > >
> > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > > ---
> > > > >  net/vhost-vdpa.c | 29 +++++++++++++++++------------
> > > > >  1 file changed, 17 insertions(+), 12 deletions(-)
> > > > >
> > > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > > index 88e0eec5fa..9ee3bc4cd3 100644
> > > > > --- a/net/vhost-vdpa.c
> > > > > +++ b/net/vhost-vdpa.c
> > > > > @@ -240,6 +240,22 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > > >          .check_peer_type = vhost_vdpa_check_peer_type,
> > > > >  };
> > > > >
> > > > > +static int vhost_vdpa_get_iova_range(int fd,
> > > > > +                                     struct vhost_vdpa_iova_range *iova_range)
> > > > > +{
> > > > > +    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > > > +
> > > > > +    return ret < 0 ? -errno : 0;
> > > > > +}
> > > >
> > > > I don't get why this needs to be moved to net specific code.
> > > >
> > >
> > > It was already in net, this code just extracted it in its own function.
> >
> > Ok, there's similar function that in vhost-vdpa.c:
> >
> > static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
> > {
> >     int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
> >                               &v->iova_range);
> >     if (ret != 0) {
> >         v->iova_range.first = 0;
> >         v->iova_range.last = UINT64_MAX;
> >     }
> >
> >     trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
> >                                     v->iova_range.last);
> > }
> >
> > I think we can reuse that.
> >
>
> That's right, but I'd do the reverse: I would store iova_min, iova_max
> in VhostVDPAState and would set it to vhost_vdpa at
> net_vhost_vdpa_init. That way, we only have one ioctl call at the
> beginning instead of having (#vq pairs + cvq) calls each time the
> device starts. I can send it in a new change if you see it ok.
>
> There are a few functions like that we can reuse in net/. To get the
> features and the backend features are two other examples. Even if we
> don't cache them since device initialization mandates the read, we
> could reduce code duplication that way.
>
> However, they use vhost_dev or vhost_vdpa instead of directly the file
> descriptor. Not a big deal but it's an extra step.
>
> What do you think?

I'm fine with this.

Thanks

>
> Thanks!
>
diff mbox series

Patch

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 88e0eec5fa..9ee3bc4cd3 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -240,6 +240,22 @@  static NetClientInfo net_vhost_vdpa_info = {
         .check_peer_type = vhost_vdpa_check_peer_type,
 };
 
+static int vhost_vdpa_get_iova_range(int fd,
+                                     struct vhost_vdpa_iova_range *iova_range)
+{
+    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
+
+    return ret < 0 ? -errno : 0;
+}
+
+static VhostIOVATree *vhost_vdpa_svq_allocate_iova_tree(int vdpa_device_fd)
+{
+    struct vhost_vdpa_iova_range iova_range;
+
+    vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
+    return vhost_iova_tree_new(iova_range.first, iova_range.last);
+}
+
 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
 {
     VhostIOVATree *tree = v->iova_tree;
@@ -587,14 +603,6 @@  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
     return nc;
 }
 
-static int vhost_vdpa_get_iova_range(int fd,
-                                     struct vhost_vdpa_iova_range *iova_range)
-{
-    int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
-
-    return ret < 0 ? -errno : 0;
-}
-
 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
 {
     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
@@ -690,14 +698,11 @@  int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
     }
 
     if (opts->x_svq) {
-        struct vhost_vdpa_iova_range iova_range;
-
         if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
             goto err_svq;
         }
 
-        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
-        iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
+        iova_tree = vhost_vdpa_svq_allocate_iova_tree(vdpa_device_fd);
     }
 
     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);