diff mbox series

[18/18] vhost-vdpa: multiqueue support

Message ID 20210621041650.5826-19-jasowang@redhat.com (mailing list archive)
State New, archived
Headers show
Series vhost-vDPA multiqueue | expand

Commit Message

Jason Wang June 21, 2021, 4:16 a.m. UTC
This patch implements the multiqueue support for vhost-vdpa. This is
done simply by reading the number of queue pairs from the config space
and initialize the datapath and control path net client.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/virtio-net.c |  3 +-
 net/vhost-vdpa.c    | 98 ++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 91 insertions(+), 10 deletions(-)

Comments

Eugenio Perez Martin July 1, 2021, 6:51 a.m. UTC | #1
On Mon, Jun 21, 2021 at 6:18 AM Jason Wang <jasowang@redhat.com> wrote:
>
> This patch implements the multiqueue support for vhost-vdpa. This is
> done simply by reading the number of queue pairs from the config space
> and initialize the datapath and control path net client.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  hw/net/virtio-net.c |  3 +-
>  net/vhost-vdpa.c    | 98 ++++++++++++++++++++++++++++++++++++++++-----
>  2 files changed, 91 insertions(+), 10 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 5074b521cf..2c2ed98c0b 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -3370,7 +3370,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>
>      n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
>
> -    /* Figure out the datapath queue pairs since the bakcend could
> +    /*
> +     * Figure out the datapath queue pairs since the bakcend could

If we are going to modify the comment we could s/bakcend/backend/.

>       * provide control queue via peers as well.
>       */
>      if (n->nic_conf.peers.queues) {
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index cc11b2ec40..048344b4bc 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -18,6 +18,7 @@
>  #include "qemu/error-report.h"
>  #include "qemu/option.h"
>  #include "qapi/error.h"
> +#include <linux/vhost.h>
>  #include <sys/ioctl.h>
>  #include <err.h>
>  #include "standard-headers/linux/virtio_net.h"
> @@ -52,6 +53,8 @@ const int vdpa_feature_bits[] = {
>      VIRTIO_NET_F_HOST_UFO,
>      VIRTIO_NET_F_MRG_RXBUF,
>      VIRTIO_NET_F_MTU,
> +    VIRTIO_NET_F_MQ,
> +    VIRTIO_NET_F_CTRL_VQ,


Hi!

I'm not sure if it's qemu the one that must control it, but I cannot
use vdpa_sim of linux 5.13 (i.e., with no control vq patches) with
this series applied:

[    3.967421] virtio_net virtio0: device advertises feature
VIRTIO_NET_F_CTRL_RX but not VIRTIO_NET_F_CTRL_VQ
[    3.968613] virtio_net: probe of virtio0 failed with error -22

Did you mention it somewhere else and I've missed it? or is it
actually a bug in the device? In this second case, I think we should
still workaround it in qemu, because old vdpasim_net with no
VIRTIO_NET_F_CTRL_VQ still works ok without this patch.

Thanks!

>      VIRTIO_F_IOMMU_PLATFORM,
>      VIRTIO_F_RING_PACKED,
>      VIRTIO_NET_F_RSS,
> @@ -82,7 +85,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
>      return ret;
>  }
>
> -static int vhost_vdpa_add(NetClientState *ncs, void *be)
> +static int vhost_vdpa_add(NetClientState *ncs, void *be, int qp_index,
> +                          int nvqs)
>  {
>      VhostNetOptions options;
>      struct vhost_net *net = NULL;
> @@ -95,7 +99,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be)
>      options.net_backend = ncs;
>      options.opaque      = be;
>      options.busyloop_timeout = 0;
> -    options.nvqs = 2;
> +    options.nvqs = nvqs;
>
>      net = vhost_net_init(&options);
>      if (!net) {
> @@ -159,18 +163,28 @@ static NetClientInfo net_vhost_vdpa_info = {
>  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>                                             const char *device,
>                                             const char *name,
> -                                           int vdpa_device_fd)
> +                                           int vdpa_device_fd,
> +                                           int qp_index,
> +                                           int nvqs,
> +                                           bool is_datapath)
>  {
>      NetClientState *nc = NULL;
>      VhostVDPAState *s;
>      int ret = 0;
>      assert(name);
> -    nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
> +    if (is_datapath) {
> +        nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
> +                                 name);
> +    } else {
> +        nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer,
> +                                         device, name);
> +    }
>      snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
>      s = DO_UPCAST(VhostVDPAState, nc, nc);
>
>      s->vhost_vdpa.device_fd = vdpa_device_fd;
> -    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
> +    s->vhost_vdpa.index = qp_index;
> +    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, qp_index, nvqs);
>      if (ret) {
>          qemu_del_net_client(nc);
>          return NULL;
> @@ -196,12 +210,52 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
>      return 0;
>  }
>
> +static int vhost_vdpa_get_max_qps(int fd, int *has_cvq, Error **errp)
> +{
> +    unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
> +    struct vhost_vdpa_config *config;
> +    __virtio16 *max_qps;
> +    uint64_t features;
> +    int ret;
> +
> +    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> +    if (ret) {
> +        error_setg(errp, "Fail to query features from vhost-vDPA device");
> +        return ret;
> +    }
> +
> +    if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
> +        *has_cvq = 1;
> +    } else {
> +        *has_cvq = 0;
> +    }
> +
> +    if (features & (1 << VIRTIO_NET_F_MQ)) {
> +        config = g_malloc0(config_size + sizeof(*max_qps));
> +        config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
> +        config->len = sizeof(*max_qps);
> +
> +        ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
> +        if (ret) {
> +            error_setg(errp, "Fail to get config from vhost-vDPA device");
> +            return -ret;
> +        }
> +
> +        max_qps = (__virtio16 *)&config->buf;
> +
> +        return lduw_le_p(max_qps);
> +    }
> +
> +    return 1;
> +}
> +
>  int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
>                          NetClientState *peer, Error **errp)
>  {
>      const NetdevVhostVDPAOptions *opts;
>      int vdpa_device_fd;
> -    NetClientState *nc;
> +    NetClientState **ncs, *nc;
> +    int qps, i, has_cvq = 0;
>
>      assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>      opts = &netdev->u.vhost_vdpa;
> @@ -216,11 +270,37 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
>          return -errno;
>      }
>
> -    nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd);
> -    if (!nc) {
> +    qps = vhost_vdpa_get_max_qps(vdpa_device_fd, &has_cvq, errp);
> +    if (qps < 0) {
>          qemu_close(vdpa_device_fd);
> -        return -1;
> +        return qps;
> +    }
> +
> +    ncs = g_malloc0(sizeof(*ncs) * qps);
> +
> +    for (i = 0; i < qps; i++) {
> +        ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> +                                     vdpa_device_fd, i, 2, true);
> +        if (!ncs[i])
> +            goto err;
>      }
>
> +    if (has_cvq) {
> +        nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> +                                 vdpa_device_fd, i, 1, false);
> +        if (!nc)
> +            goto err;
> +    }
> +
> +    g_free(ncs);
>      return 0;
> +
> +err:
> +    if (i) {
> +        qemu_del_net_client(ncs[0]);
> +    }
> +    qemu_close(vdpa_device_fd);
> +    g_free(ncs);
> +
> +    return -1;
>  }
> --
> 2.25.1
>
Jason Wang July 1, 2021, 8:15 a.m. UTC | #2
在 2021/7/1 下午2:51, Eugenio Perez Martin 写道:
> On Mon, Jun 21, 2021 at 6:18 AM Jason Wang <jasowang@redhat.com> wrote:
>> This patch implements the multiqueue support for vhost-vdpa. This is
>> done simply by reading the number of queue pairs from the config space
>> and initialize the datapath and control path net client.
>>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>>   hw/net/virtio-net.c |  3 +-
>>   net/vhost-vdpa.c    | 98 ++++++++++++++++++++++++++++++++++++++++-----
>>   2 files changed, 91 insertions(+), 10 deletions(-)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index 5074b521cf..2c2ed98c0b 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -3370,7 +3370,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>>
>>       n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
>>
>> -    /* Figure out the datapath queue pairs since the bakcend could
>> +    /*
>> +     * Figure out the datapath queue pairs since the bakcend could
> If we are going to modify the comment we could s/bakcend/backend/.


Will fix.


>
>>        * provide control queue via peers as well.
>>        */
>>       if (n->nic_conf.peers.queues) {
>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>> index cc11b2ec40..048344b4bc 100644
>> --- a/net/vhost-vdpa.c
>> +++ b/net/vhost-vdpa.c
>> @@ -18,6 +18,7 @@
>>   #include "qemu/error-report.h"
>>   #include "qemu/option.h"
>>   #include "qapi/error.h"
>> +#include <linux/vhost.h>
>>   #include <sys/ioctl.h>
>>   #include <err.h>
>>   #include "standard-headers/linux/virtio_net.h"
>> @@ -52,6 +53,8 @@ const int vdpa_feature_bits[] = {
>>       VIRTIO_NET_F_HOST_UFO,
>>       VIRTIO_NET_F_MRG_RXBUF,
>>       VIRTIO_NET_F_MTU,
>> +    VIRTIO_NET_F_MQ,
>> +    VIRTIO_NET_F_CTRL_VQ,
>
> Hi!
>
> I'm not sure if it's qemu the one that must control it, but I cannot
> use vdpa_sim of linux 5.13 (i.e., with no control vq patches) with
> this series applied:
>
> [    3.967421] virtio_net virtio0: device advertises feature
> VIRTIO_NET_F_CTRL_RX but not VIRTIO_NET_F_CTRL_VQ
> [    3.968613] virtio_net: probe of virtio0 failed with error -22


Interesting, looks like a bug somewhere.

We never advertise CTRL_RX in the case of simulator.


>
> Did you mention it somewhere else and I've missed it? or is it
> actually a bug in the device? In this second case, I think we should
> still workaround it in qemu, because old vdpasim_net with no
> VIRTIO_NET_F_CTRL_VQ still works ok without this patch.


Should be a bug, will have a look.

Thanks


>
> Thanks!
>
>>       VIRTIO_F_IOMMU_PLATFORM,
>>       VIRTIO_F_RING_PACKED,
>>       VIRTIO_NET_F_RSS,
>> @@ -82,7 +85,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
>>       return ret;
>>   }
>>
>> -static int vhost_vdpa_add(NetClientState *ncs, void *be)
>> +static int vhost_vdpa_add(NetClientState *ncs, void *be, int qp_index,
>> +                          int nvqs)
>>   {
>>       VhostNetOptions options;
>>       struct vhost_net *net = NULL;
>> @@ -95,7 +99,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be)
>>       options.net_backend = ncs;
>>       options.opaque      = be;
>>       options.busyloop_timeout = 0;
>> -    options.nvqs = 2;
>> +    options.nvqs = nvqs;
>>
>>       net = vhost_net_init(&options);
>>       if (!net) {
>> @@ -159,18 +163,28 @@ static NetClientInfo net_vhost_vdpa_info = {
>>   static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>>                                              const char *device,
>>                                              const char *name,
>> -                                           int vdpa_device_fd)
>> +                                           int vdpa_device_fd,
>> +                                           int qp_index,
>> +                                           int nvqs,
>> +                                           bool is_datapath)
>>   {
>>       NetClientState *nc = NULL;
>>       VhostVDPAState *s;
>>       int ret = 0;
>>       assert(name);
>> -    nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
>> +    if (is_datapath) {
>> +        nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
>> +                                 name);
>> +    } else {
>> +        nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer,
>> +                                         device, name);
>> +    }
>>       snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
>>       s = DO_UPCAST(VhostVDPAState, nc, nc);
>>
>>       s->vhost_vdpa.device_fd = vdpa_device_fd;
>> -    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
>> +    s->vhost_vdpa.index = qp_index;
>> +    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, qp_index, nvqs);
>>       if (ret) {
>>           qemu_del_net_client(nc);
>>           return NULL;
>> @@ -196,12 +210,52 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
>>       return 0;
>>   }
>>
>> +static int vhost_vdpa_get_max_qps(int fd, int *has_cvq, Error **errp)
>> +{
>> +    unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
>> +    struct vhost_vdpa_config *config;
>> +    __virtio16 *max_qps;
>> +    uint64_t features;
>> +    int ret;
>> +
>> +    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>> +    if (ret) {
>> +        error_setg(errp, "Fail to query features from vhost-vDPA device");
>> +        return ret;
>> +    }
>> +
>> +    if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
>> +        *has_cvq = 1;
>> +    } else {
>> +        *has_cvq = 0;
>> +    }
>> +
>> +    if (features & (1 << VIRTIO_NET_F_MQ)) {
>> +        config = g_malloc0(config_size + sizeof(*max_qps));
>> +        config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
>> +        config->len = sizeof(*max_qps);
>> +
>> +        ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
>> +        if (ret) {
>> +            error_setg(errp, "Fail to get config from vhost-vDPA device");
>> +            return -ret;
>> +        }
>> +
>> +        max_qps = (__virtio16 *)&config->buf;
>> +
>> +        return lduw_le_p(max_qps);
>> +    }
>> +
>> +    return 1;
>> +}
>> +
>>   int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
>>                           NetClientState *peer, Error **errp)
>>   {
>>       const NetdevVhostVDPAOptions *opts;
>>       int vdpa_device_fd;
>> -    NetClientState *nc;
>> +    NetClientState **ncs, *nc;
>> +    int qps, i, has_cvq = 0;
>>
>>       assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>>       opts = &netdev->u.vhost_vdpa;
>> @@ -216,11 +270,37 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
>>           return -errno;
>>       }
>>
>> -    nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd);
>> -    if (!nc) {
>> +    qps = vhost_vdpa_get_max_qps(vdpa_device_fd, &has_cvq, errp);
>> +    if (qps < 0) {
>>           qemu_close(vdpa_device_fd);
>> -        return -1;
>> +        return qps;
>> +    }
>> +
>> +    ncs = g_malloc0(sizeof(*ncs) * qps);
>> +
>> +    for (i = 0; i < qps; i++) {
>> +        ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
>> +                                     vdpa_device_fd, i, 2, true);
>> +        if (!ncs[i])
>> +            goto err;
>>       }
>>
>> +    if (has_cvq) {
>> +        nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
>> +                                 vdpa_device_fd, i, 1, false);
>> +        if (!nc)
>> +            goto err;
>> +    }
>> +
>> +    g_free(ncs);
>>       return 0;
>> +
>> +err:
>> +    if (i) {
>> +        qemu_del_net_client(ncs[0]);
>> +    }
>> +    qemu_close(vdpa_device_fd);
>> +    g_free(ncs);
>> +
>> +    return -1;
>>   }
>> --
>> 2.25.1
>>
Jason Wang July 6, 2021, 7:46 a.m. UTC | #3
在 2021/7/1 下午2:51, Eugenio Perez Martin 写道:
>>        * provide control queue via peers as well.
>>        */
>>       if (n->nic_conf.peers.queues) {
>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>> index cc11b2ec40..048344b4bc 100644
>> --- a/net/vhost-vdpa.c
>> +++ b/net/vhost-vdpa.c
>> @@ -18,6 +18,7 @@
>>   #include "qemu/error-report.h"
>>   #include "qemu/option.h"
>>   #include "qapi/error.h"
>> +#include <linux/vhost.h>
>>   #include <sys/ioctl.h>
>>   #include <err.h>
>>   #include "standard-headers/linux/virtio_net.h"
>> @@ -52,6 +53,8 @@ const int vdpa_feature_bits[] = {
>>       VIRTIO_NET_F_HOST_UFO,
>>       VIRTIO_NET_F_MRG_RXBUF,
>>       VIRTIO_NET_F_MTU,
>> +    VIRTIO_NET_F_MQ,
>> +    VIRTIO_NET_F_CTRL_VQ,
> Hi!
>
> I'm not sure if it's qemu the one that must control it, but I cannot
> use vdpa_sim of linux 5.13 (i.e., with no control vq patches) with
> this series applied:
>
> [    3.967421] virtio_net virtio0: device advertises feature
> VIRTIO_NET_F_CTRL_RX but not VIRTIO_NET_F_CTRL_VQ
> [    3.968613] virtio_net: probe of virtio0 failed with error -22
>
> Did you mention it somewhere else and I've missed it? or is it
> actually a bug in the device? In this second case, I think we should
> still workaround it in qemu, because old vdpasim_net with no
> VIRTIO_NET_F_CTRL_VQ still works ok without this patch.
>
> Thanks!


So the problem is we need not only validating MQ but also all the 
features that depends on the CTRL VQ here (rx filters, mac, rss, 
announce etc).

I will fix this in the next version.

Thanks
diff mbox series

Patch

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5074b521cf..2c2ed98c0b 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3370,7 +3370,8 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
 
     n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
 
-    /* Figure out the datapath queue pairs since the bakcend could
+    /*
+     * Figure out the datapath queue pairs since the bakcend could
      * provide control queue via peers as well.
      */
     if (n->nic_conf.peers.queues) {
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index cc11b2ec40..048344b4bc 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -18,6 +18,7 @@ 
 #include "qemu/error-report.h"
 #include "qemu/option.h"
 #include "qapi/error.h"
+#include <linux/vhost.h>
 #include <sys/ioctl.h>
 #include <err.h>
 #include "standard-headers/linux/virtio_net.h"
@@ -52,6 +53,8 @@  const int vdpa_feature_bits[] = {
     VIRTIO_NET_F_HOST_UFO,
     VIRTIO_NET_F_MRG_RXBUF,
     VIRTIO_NET_F_MTU,
+    VIRTIO_NET_F_MQ,
+    VIRTIO_NET_F_CTRL_VQ,
     VIRTIO_F_IOMMU_PLATFORM,
     VIRTIO_F_RING_PACKED,
     VIRTIO_NET_F_RSS,
@@ -82,7 +85,8 @@  static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
     return ret;
 }
 
-static int vhost_vdpa_add(NetClientState *ncs, void *be)
+static int vhost_vdpa_add(NetClientState *ncs, void *be, int qp_index,
+                          int nvqs)
 {
     VhostNetOptions options;
     struct vhost_net *net = NULL;
@@ -95,7 +99,7 @@  static int vhost_vdpa_add(NetClientState *ncs, void *be)
     options.net_backend = ncs;
     options.opaque      = be;
     options.busyloop_timeout = 0;
-    options.nvqs = 2;
+    options.nvqs = nvqs;
 
     net = vhost_net_init(&options);
     if (!net) {
@@ -159,18 +163,28 @@  static NetClientInfo net_vhost_vdpa_info = {
 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
                                            const char *device,
                                            const char *name,
-                                           int vdpa_device_fd)
+                                           int vdpa_device_fd,
+                                           int qp_index,
+                                           int nvqs,
+                                           bool is_datapath)
 {
     NetClientState *nc = NULL;
     VhostVDPAState *s;
     int ret = 0;
     assert(name);
-    nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name);
+    if (is_datapath) {
+        nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
+                                 name);
+    } else {
+        nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer,
+                                         device, name);
+    }
     snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
     s = DO_UPCAST(VhostVDPAState, nc, nc);
 
     s->vhost_vdpa.device_fd = vdpa_device_fd;
-    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa);
+    s->vhost_vdpa.index = qp_index;
+    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, qp_index, nvqs);
     if (ret) {
         qemu_del_net_client(nc);
         return NULL;
@@ -196,12 +210,52 @@  static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
     return 0;
 }
 
+static int vhost_vdpa_get_max_qps(int fd, int *has_cvq, Error **errp)
+{
+    unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
+    struct vhost_vdpa_config *config;
+    __virtio16 *max_qps;
+    uint64_t features;
+    int ret;
+
+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
+    if (ret) {
+        error_setg(errp, "Fail to query features from vhost-vDPA device");
+        return ret;
+    }
+
+    if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
+        *has_cvq = 1;
+    } else {
+        *has_cvq = 0;
+    }
+
+    if (features & (1 << VIRTIO_NET_F_MQ)) {
+        config = g_malloc0(config_size + sizeof(*max_qps));
+        config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
+        config->len = sizeof(*max_qps);
+
+        ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
+        if (ret) {
+            error_setg(errp, "Fail to get config from vhost-vDPA device");
+            return -ret;
+        }
+
+        max_qps = (__virtio16 *)&config->buf;
+
+        return lduw_le_p(max_qps);
+    }
+
+    return 1;
+}
+
 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
                         NetClientState *peer, Error **errp)
 {
     const NetdevVhostVDPAOptions *opts;
     int vdpa_device_fd;
-    NetClientState *nc;
+    NetClientState **ncs, *nc;
+    int qps, i, has_cvq = 0;
 
     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
     opts = &netdev->u.vhost_vdpa;
@@ -216,11 +270,37 @@  int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
         return -errno;
     }
 
-    nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd);
-    if (!nc) {
+    qps = vhost_vdpa_get_max_qps(vdpa_device_fd, &has_cvq, errp);
+    if (qps < 0) {
         qemu_close(vdpa_device_fd);
-        return -1;
+        return qps;
+    }
+
+    ncs = g_malloc0(sizeof(*ncs) * qps);
+
+    for (i = 0; i < qps; i++) {
+        ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
+                                     vdpa_device_fd, i, 2, true);
+        if (!ncs[i])
+            goto err;
     }
 
+    if (has_cvq) {
+        nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
+                                 vdpa_device_fd, i, 1, false);
+        if (!nc)
+            goto err;
+    }
+
+    g_free(ncs);
     return 0;
+
+err:
+    if (i) {
+        qemu_del_net_client(ncs[0]);
+    }
+    qemu_close(vdpa_device_fd);
+    g_free(ncs);
+
+    return -1;
 }