diff mbox series

[v3,2/8] vdpa: Use iovec for vhost_vdpa_net_cvq_add()

Message ID b1d473772ec4bcb254ab0d12430c9b1efe758606.1689748694.git.yin31149@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v3,1/8] vhost: Add argument to vhost_svq_poll() | expand

Commit Message

Hawkins Jiawei July 19, 2023, 7:53 a.m. UTC
Next patches in this series will no longer perform an
immediate poll and check of the device's used buffers
for each CVQ state load command. Consequently, there
will be multiple pending buffers in the shadow VirtQueue,
making it a must for every control command to have its
own buffer.

To achieve this, this patch refactor vhost_vdpa_net_cvq_add()
to accept `struct iovec`, which eliminates the coupling of
control commands to `s->cvq_cmd_out_buffer` and `s->status`,
allowing them to use their own buffer.

Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
---
 net/vhost-vdpa.c | 38 ++++++++++++++++++++------------------
 1 file changed, 20 insertions(+), 18 deletions(-)

Comments

Eugenio Perez Martin Aug. 18, 2023, 3:23 p.m. UTC | #1
On Wed, Jul 19, 2023 at 9:54 AM Hawkins Jiawei <yin31149@gmail.com> wrote:
>
> Next patches in this series will no longer perform an
> immediate poll and check of the device's used buffers
> for each CVQ state load command. Consequently, there
> will be multiple pending buffers in the shadow VirtQueue,
> making it a must for every control command to have its
> own buffer.
>
> To achieve this, this patch refactor vhost_vdpa_net_cvq_add()
> to accept `struct iovec`, which eliminates the coupling of
> control commands to `s->cvq_cmd_out_buffer` and `s->status`,
> allowing them to use their own buffer.
>
> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
> ---
>  net/vhost-vdpa.c | 38 ++++++++++++++++++++------------------
>  1 file changed, 20 insertions(+), 18 deletions(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index d1dd140bf6..6b16c8ece0 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -596,22 +596,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
>      vhost_vdpa_net_client_stop(nc);
>  }
>
> -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> -                                      size_t in_len)
> +static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
> +                                      struct iovec *out_sg, size_t out_num,
> +                                      struct iovec *in_sg, size_t in_num)
>  {
> -    /* Buffers for the device */
> -    const struct iovec out = {
> -        .iov_base = s->cvq_cmd_out_buffer,
> -        .iov_len = out_len,
> -    };
> -    const struct iovec in = {
> -        .iov_base = s->status,
> -        .iov_len = sizeof(virtio_net_ctrl_ack),
> -    };
>      VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
>      int r;
>
> -    r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
> +    r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
>      if (unlikely(r != 0)) {
>          if (unlikely(r == -ENOSPC)) {
>              qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
> @@ -637,6 +629,15 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
>          .cmd = cmd,
>      };
>      size_t data_size = iov_size(data_sg, data_num);
> +    /* Buffers for the device */
> +    struct iovec out = {
> +        .iov_base = s->cvq_cmd_out_buffer,
> +        .iov_len = sizeof(ctrl) + data_size,
> +    };
> +    struct iovec in = {
> +        .iov_base = s->status,
> +        .iov_len = sizeof(*s->status),
> +    };
>
>      assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
>
> @@ -647,8 +648,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
>      iov_to_buf(data_sg, data_num, 0,
>                 s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
>
> -    return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
> -                                  sizeof(virtio_net_ctrl_ack));
> +    return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
>  }
>
>  static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
> @@ -1222,9 +1222,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>      struct iovec out = {
>          .iov_base = s->cvq_cmd_out_buffer,
>      };
> -    /* in buffer used for device model */
> -    const struct iovec in = {
> -        .iov_base = &status,
> +    struct iovec in = {

What if instead of reusing "in" we declare a new struct iovec in the
condition that calls vhost_vdpa_net_cvq_add? Something in the line of
"device_in".

I'm also ok with this code, but splitting them would reduce the
possibility of sending the wrong one to the device / virtio device
model by mistake.

Thanks!

>          .iov_len = sizeof(status),
>      };
>      ssize_t dev_written = -EINVAL;
> @@ -1232,6 +1230,8 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>      out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
>                               s->cvq_cmd_out_buffer,
>                               vhost_vdpa_net_cvq_cmd_page_len());
> +    /* In buffer used for the vdpa device */
> +    in.iov_base = s->status;
>
>      ctrl = s->cvq_cmd_out_buffer;
>      if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
> @@ -1260,7 +1260,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>              goto out;
>          }
>      } else {
> -        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
> +        dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
>          if (unlikely(dev_written < 0)) {
>              goto out;
>          }
> @@ -1276,6 +1276,8 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>      }
>
>      status = VIRTIO_NET_ERR;
> +    /* In buffer used for the device model */
> +    in.iov_base = &status;
>      virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
>      if (status != VIRTIO_NET_OK) {
>          error_report("Bad CVQ processing in model");
> --
> 2.25.1
>
Hawkins Jiawei Aug. 20, 2023, 2:33 a.m. UTC | #2
On 2023/8/18 23:23, Eugenio Perez Martin wrote:
> On Wed, Jul 19, 2023 at 9:54 AM Hawkins Jiawei <yin31149@gmail.com> wrote:
>>
>> Next patches in this series will no longer perform an
>> immediate poll and check of the device's used buffers
>> for each CVQ state load command. Consequently, there
>> will be multiple pending buffers in the shadow VirtQueue,
>> making it a must for every control command to have its
>> own buffer.
>>
>> To achieve this, this patch refactor vhost_vdpa_net_cvq_add()
>> to accept `struct iovec`, which eliminates the coupling of
>> control commands to `s->cvq_cmd_out_buffer` and `s->status`,
>> allowing them to use their own buffer.
>>
>> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
>> ---
>>   net/vhost-vdpa.c | 38 ++++++++++++++++++++------------------
>>   1 file changed, 20 insertions(+), 18 deletions(-)
>>
>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
>> index d1dd140bf6..6b16c8ece0 100644
>> --- a/net/vhost-vdpa.c
>> +++ b/net/vhost-vdpa.c
>> @@ -596,22 +596,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
>>       vhost_vdpa_net_client_stop(nc);
>>   }
>>
>> -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
>> -                                      size_t in_len)
>> +static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
>> +                                      struct iovec *out_sg, size_t out_num,
>> +                                      struct iovec *in_sg, size_t in_num)
>>   {
>> -    /* Buffers for the device */
>> -    const struct iovec out = {
>> -        .iov_base = s->cvq_cmd_out_buffer,
>> -        .iov_len = out_len,
>> -    };
>> -    const struct iovec in = {
>> -        .iov_base = s->status,
>> -        .iov_len = sizeof(virtio_net_ctrl_ack),
>> -    };
>>       VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
>>       int r;
>>
>> -    r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
>> +    r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
>>       if (unlikely(r != 0)) {
>>           if (unlikely(r == -ENOSPC)) {
>>               qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
>> @@ -637,6 +629,15 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
>>           .cmd = cmd,
>>       };
>>       size_t data_size = iov_size(data_sg, data_num);
>> +    /* Buffers for the device */
>> +    struct iovec out = {
>> +        .iov_base = s->cvq_cmd_out_buffer,
>> +        .iov_len = sizeof(ctrl) + data_size,
>> +    };
>> +    struct iovec in = {
>> +        .iov_base = s->status,
>> +        .iov_len = sizeof(*s->status),
>> +    };
>>
>>       assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
>>
>> @@ -647,8 +648,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
>>       iov_to_buf(data_sg, data_num, 0,
>>                  s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
>>
>> -    return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
>> -                                  sizeof(virtio_net_ctrl_ack));
>> +    return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
>>   }
>>
>>   static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
>> @@ -1222,9 +1222,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>>       struct iovec out = {
>>           .iov_base = s->cvq_cmd_out_buffer,
>>       };
>> -    /* in buffer used for device model */
>> -    const struct iovec in = {
>> -        .iov_base = &status,
>> +    struct iovec in = {
>
> What if instead of reusing "in" we declare a new struct iovec in the
> condition that calls vhost_vdpa_net_cvq_add? Something in the line of
> "device_in".
>
> I'm also ok with this code, but splitting them would reduce the
> possibility of sending the wrong one to the device / virtio device
> model by mistake.

Hi Eugenio,

Ok, I will refactor this part of code according to your suggestion in
the v4 patch.

Thanks!


>
> Thanks!
>
>>           .iov_len = sizeof(status),
>>       };
>>       ssize_t dev_written = -EINVAL;
>> @@ -1232,6 +1230,8 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>>       out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
>>                                s->cvq_cmd_out_buffer,
>>                                vhost_vdpa_net_cvq_cmd_page_len());
>> +    /* In buffer used for the vdpa device */
>> +    in.iov_base = s->status;
>>
>>       ctrl = s->cvq_cmd_out_buffer;
>>       if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
>> @@ -1260,7 +1260,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>>               goto out;
>>           }
>>       } else {
>> -        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
>> +        dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
>>           if (unlikely(dev_written < 0)) {
>>               goto out;
>>           }
>> @@ -1276,6 +1276,8 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>>       }
>>
>>       status = VIRTIO_NET_ERR;
>> +    /* In buffer used for the device model */
>> +    in.iov_base = &status;
>>       virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
>>       if (status != VIRTIO_NET_OK) {
>>           error_report("Bad CVQ processing in model");
>> --
>> 2.25.1
>>
>
diff mbox series

Patch

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index d1dd140bf6..6b16c8ece0 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -596,22 +596,14 @@  static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
     vhost_vdpa_net_client_stop(nc);
 }
 
-static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
-                                      size_t in_len)
+static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
+                                      struct iovec *out_sg, size_t out_num,
+                                      struct iovec *in_sg, size_t in_num)
 {
-    /* Buffers for the device */
-    const struct iovec out = {
-        .iov_base = s->cvq_cmd_out_buffer,
-        .iov_len = out_len,
-    };
-    const struct iovec in = {
-        .iov_base = s->status,
-        .iov_len = sizeof(virtio_net_ctrl_ack),
-    };
     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
     int r;
 
-    r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
+    r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
     if (unlikely(r != 0)) {
         if (unlikely(r == -ENOSPC)) {
             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
@@ -637,6 +629,15 @@  static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
         .cmd = cmd,
     };
     size_t data_size = iov_size(data_sg, data_num);
+    /* Buffers for the device */
+    struct iovec out = {
+        .iov_base = s->cvq_cmd_out_buffer,
+        .iov_len = sizeof(ctrl) + data_size,
+    };
+    struct iovec in = {
+        .iov_base = s->status,
+        .iov_len = sizeof(*s->status),
+    };
 
     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
 
@@ -647,8 +648,7 @@  static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
     iov_to_buf(data_sg, data_num, 0,
                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
 
-    return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
-                                  sizeof(virtio_net_ctrl_ack));
+    return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
 }
 
 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
@@ -1222,9 +1222,7 @@  static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
     struct iovec out = {
         .iov_base = s->cvq_cmd_out_buffer,
     };
-    /* in buffer used for device model */
-    const struct iovec in = {
-        .iov_base = &status,
+    struct iovec in = {
         .iov_len = sizeof(status),
     };
     ssize_t dev_written = -EINVAL;
@@ -1232,6 +1230,8 @@  static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
                              s->cvq_cmd_out_buffer,
                              vhost_vdpa_net_cvq_cmd_page_len());
+    /* In buffer used for the vdpa device */
+    in.iov_base = s->status;
 
     ctrl = s->cvq_cmd_out_buffer;
     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
@@ -1260,7 +1260,7 @@  static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
             goto out;
         }
     } else {
-        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
+        dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
         if (unlikely(dev_written < 0)) {
             goto out;
         }
@@ -1276,6 +1276,8 @@  static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
     }
 
     status = VIRTIO_NET_ERR;
+    /* In buffer used for the device model */
+    in.iov_base = &status;
     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
     if (status != VIRTIO_NET_OK) {
         error_report("Bad CVQ processing in model");