diff mbox series

[v2] vdpa: Fix endian bugs in shadow virtqueue

Message ID 20250212164923.1971538-1-kshk@linux.ibm.com (mailing list archive)
State New
Headers show
Series [v2] vdpa: Fix endian bugs in shadow virtqueue | expand

Commit Message

Konstantin Shkolnyy Feb. 12, 2025, 4:49 p.m. UTC
VDPA didn't work on a big-endian machine due to missing/incorrect
CPU<->LE data format conversions.

Signed-off-by: Konstantin Shkolnyy <kshk@linux.ibm.com>
---
Changes in v2: Change desc_next[] from LE format to "CPU".

 hw/virtio/vhost-shadow-virtqueue.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

Comments

Philippe Mathieu-Daudé Feb. 12, 2025, 6:11 p.m. UTC | #1
On 12/2/25 17:49, Konstantin Shkolnyy wrote:
> VDPA didn't work on a big-endian machine due to missing/incorrect
> CPU<->LE data format conversions.
> 
> Signed-off-by: Konstantin Shkolnyy <kshk@linux.ibm.com>
> ---
> Changes in v2: Change desc_next[] from LE format to "CPU".
> 
>   hw/virtio/vhost-shadow-virtqueue.c | 18 ++++++++++--------
>   1 file changed, 10 insertions(+), 8 deletions(-)


> @@ -228,10 +228,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
>       smp_mb();
>   
>       if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> -        uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
> +        uint16_t avail_event = le16_to_cpu(
> +                *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));

Nitpicking, sometimes using the ld/st API is cleaner (here lduw_le_p).

>           needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
>       } else {
> -        needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
> +        needs_kick =
> +                !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
>       }
Eugenio Perez Martin Feb. 13, 2025, 6:43 a.m. UTC | #2
On Wed, Feb 12, 2025 at 7:11 PM Philippe Mathieu-Daudé
<philmd@linaro.org> wrote:
>
> On 12/2/25 17:49, Konstantin Shkolnyy wrote:
> > VDPA didn't work on a big-endian machine due to missing/incorrect
> > CPU<->LE data format conversions.
> >
> > Signed-off-by: Konstantin Shkolnyy <kshk@linux.ibm.com>
> > ---
> > Changes in v2: Change desc_next[] from LE format to "CPU".
> >
> >   hw/virtio/vhost-shadow-virtqueue.c | 18 ++++++++++--------
> >   1 file changed, 10 insertions(+), 8 deletions(-)
>
>
> > @@ -228,10 +228,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
> >       smp_mb();
> >
> >       if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> > -        uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
> > +        uint16_t avail_event = le16_to_cpu(
> > +                *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
>
> Nitpicking, sometimes using the ld/st API is cleaner (here lduw_le_p).
>

I'm not sure if it is right in SVQ, as it is not accessing guest
memory but QEMU memory that has been mapped to a device. But if you
think it is still a valid use case for ld* and st* family I'd be
totally ok with that usage.

> >           needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
> >       } else {
> > -        needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
> > +        needs_kick =
> > +                !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
> >       }
>
Eugenio Perez Martin Feb. 13, 2025, 6:49 a.m. UTC | #3
On Wed, Feb 12, 2025 at 5:49 PM Konstantin Shkolnyy <kshk@linux.ibm.com> wrote:
>
> VDPA didn't work on a big-endian machine due to missing/incorrect
> CPU<->LE data format conversions.
>

Fixes: 10857ec0ad ("vhost: Add VhostShadowVirtqueue")

> Signed-off-by: Konstantin Shkolnyy <kshk@linux.ibm.com>

Acked-by: Eugenio Pérez <eperezma@redhat.com>

Thanks!

> ---
> Changes in v2: Change desc_next[] from LE format to "CPU".
>
>  hw/virtio/vhost-shadow-virtqueue.c | 18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> index 37aca8b431..4af0d7c669 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.c
> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> @@ -165,10 +165,10 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
>          descs[i].len = cpu_to_le32(iovec[n].iov_len);
>
>          last = i;
> -        i = cpu_to_le16(svq->desc_next[i]);
> +        i = svq->desc_next[i];
>      }
>
> -    svq->free_head = le16_to_cpu(svq->desc_next[last]);
> +    svq->free_head = svq->desc_next[last];
>      return true;
>  }
>
> @@ -228,10 +228,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
>      smp_mb();
>
>      if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> -        uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
> +        uint16_t avail_event = le16_to_cpu(
> +                *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
>          needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
>      } else {
> -        needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
> +        needs_kick =
> +                !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
>      }
>
>      if (!needs_kick) {
> @@ -365,7 +367,7 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
>          return true;
>      }
>
> -    svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
> +    svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx);
>
>      return svq->last_used_idx != svq->shadow_used_idx;
>  }
> @@ -383,7 +385,7 @@ static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
>  {
>      if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
>          uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
> -        *used_event = svq->shadow_used_idx;
> +        *used_event = cpu_to_le16(svq->shadow_used_idx);
>      } else {
>          svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
>      }
> @@ -408,7 +410,7 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
>                                               uint16_t num, uint16_t i)
>  {
>      for (uint16_t j = 0; j < (num - 1); ++j) {
> -        i = le16_to_cpu(svq->desc_next[i]);
> +        i = svq->desc_next[i];
>      }
>
>      return i;
> @@ -683,7 +685,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
>      svq->desc_state = g_new0(SVQDescState, svq->vring.num);
>      svq->desc_next = g_new0(uint16_t, svq->vring.num);
>      for (unsigned i = 0; i < svq->vring.num - 1; i++) {
> -        svq->desc_next[i] = cpu_to_le16(i + 1);
> +        svq->desc_next[i] = i + 1;
>      }
>  }
>
> --
> 2.34.1
>
Philippe Mathieu-Daudé Feb. 13, 2025, 12:24 p.m. UTC | #4
On 13/2/25 07:43, Eugenio Perez Martin wrote:
> On Wed, Feb 12, 2025 at 7:11 PM Philippe Mathieu-Daudé
> <philmd@linaro.org> wrote:
>>
>> On 12/2/25 17:49, Konstantin Shkolnyy wrote:
>>> VDPA didn't work on a big-endian machine due to missing/incorrect
>>> CPU<->LE data format conversions.
>>>
>>> Signed-off-by: Konstantin Shkolnyy <kshk@linux.ibm.com>
>>> ---
>>> Changes in v2: Change desc_next[] from LE format to "CPU".
>>>
>>>    hw/virtio/vhost-shadow-virtqueue.c | 18 ++++++++++--------
>>>    1 file changed, 10 insertions(+), 8 deletions(-)
>>
>>
>>> @@ -228,10 +228,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
>>>        smp_mb();
>>>
>>>        if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
>>> -        uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
>>> +        uint16_t avail_event = le16_to_cpu(
>>> +                *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
>>
>> Nitpicking, sometimes using the ld/st API is cleaner (here lduw_le_p).
>>
> 
> I'm not sure if it is right in SVQ, as it is not accessing guest
> memory but QEMU memory that has been mapped to a device. But if you
> think it is still a valid use case for ld* and st* family I'd be
> totally ok with that usage.

No need to change, better use a consistent API over the file.

Regards,

Phil.
Lei Yang Feb. 14, 2025, 2:24 a.m. UTC | #5
I tested this patch with vdpa's regression tests, everything works fine.

Tested-by: Lei Yang <leiyang@redhat.com>

On Thu, Feb 13, 2025 at 2:51 PM Eugenio Perez Martin
<eperezma@redhat.com> wrote:
>
> On Wed, Feb 12, 2025 at 5:49 PM Konstantin Shkolnyy <kshk@linux.ibm.com> wrote:
> >
> > VDPA didn't work on a big-endian machine due to missing/incorrect
> > CPU<->LE data format conversions.
> >
>
> Fixes: 10857ec0ad ("vhost: Add VhostShadowVirtqueue")
>
> > Signed-off-by: Konstantin Shkolnyy <kshk@linux.ibm.com>
>
> Acked-by: Eugenio Pérez <eperezma@redhat.com>
>
> Thanks!
>
> > ---
> > Changes in v2: Change desc_next[] from LE format to "CPU".
> >
> >  hw/virtio/vhost-shadow-virtqueue.c | 18 ++++++++++--------
> >  1 file changed, 10 insertions(+), 8 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> > index 37aca8b431..4af0d7c669 100644
> > --- a/hw/virtio/vhost-shadow-virtqueue.c
> > +++ b/hw/virtio/vhost-shadow-virtqueue.c
> > @@ -165,10 +165,10 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
> >          descs[i].len = cpu_to_le32(iovec[n].iov_len);
> >
> >          last = i;
> > -        i = cpu_to_le16(svq->desc_next[i]);
> > +        i = svq->desc_next[i];
> >      }
> >
> > -    svq->free_head = le16_to_cpu(svq->desc_next[last]);
> > +    svq->free_head = svq->desc_next[last];
> >      return true;
> >  }
> >
> > @@ -228,10 +228,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
> >      smp_mb();
> >
> >      if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> > -        uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
> > +        uint16_t avail_event = le16_to_cpu(
> > +                *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
> >          needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
> >      } else {
> > -        needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
> > +        needs_kick =
> > +                !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
> >      }
> >
> >      if (!needs_kick) {
> > @@ -365,7 +367,7 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
> >          return true;
> >      }
> >
> > -    svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
> > +    svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx);
> >
> >      return svq->last_used_idx != svq->shadow_used_idx;
> >  }
> > @@ -383,7 +385,7 @@ static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
> >  {
> >      if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> >          uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
> > -        *used_event = svq->shadow_used_idx;
> > +        *used_event = cpu_to_le16(svq->shadow_used_idx);
> >      } else {
> >          svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
> >      }
> > @@ -408,7 +410,7 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
> >                                               uint16_t num, uint16_t i)
> >  {
> >      for (uint16_t j = 0; j < (num - 1); ++j) {
> > -        i = le16_to_cpu(svq->desc_next[i]);
> > +        i = svq->desc_next[i];
> >      }
> >
> >      return i;
> > @@ -683,7 +685,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
> >      svq->desc_state = g_new0(SVQDescState, svq->vring.num);
> >      svq->desc_next = g_new0(uint16_t, svq->vring.num);
> >      for (unsigned i = 0; i < svq->vring.num - 1; i++) {
> > -        svq->desc_next[i] = cpu_to_le16(i + 1);
> > +        svq->desc_next[i] = i + 1;
> >      }
> >  }
> >
> > --
> > 2.34.1
> >
>
>
Konstantin Shkolnyy Feb. 14, 2025, 1:01 p.m. UTC | #6
On 2/13/2025 20:24, Lei Yang wrote:
> vdpa's regression tests
Konstantin Shkolnyy Feb. 14, 2025, 1:02 p.m. UTC | #7
On 2/13/2025 20:24, Lei Yang wrote:
> I tested this patch with vdpa's regression tests, everything works fine.
> 
> Tested-by: Lei Yang <leiyang@redhat.com>

Could you point me to those tests?
Lei Yang Feb. 15, 2025, 8:21 a.m. UTC | #8
On Fri, Feb 14, 2025 at 9:02 PM Konstantin Shkolnyy <kshk@linux.ibm.com> wrote:
>
> On 2/13/2025 20:24, Lei Yang wrote:
> > I tested this patch with vdpa's regression tests, everything works fine.
> >
> > Tested-by: Lei Yang <leiyang@redhat.com>
>
> Could you point me to those tests?

Sure, the test scenarios include ping, mq tests under netperf stress,
hotplug/unplug, reboot,shutdown, pxe_boot etc.
>
diff mbox series

Patch

diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 37aca8b431..4af0d7c669 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -165,10 +165,10 @@  static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
         descs[i].len = cpu_to_le32(iovec[n].iov_len);
 
         last = i;
-        i = cpu_to_le16(svq->desc_next[i]);
+        i = svq->desc_next[i];
     }
 
-    svq->free_head = le16_to_cpu(svq->desc_next[last]);
+    svq->free_head = svq->desc_next[last];
     return true;
 }
 
@@ -228,10 +228,12 @@  static void vhost_svq_kick(VhostShadowVirtqueue *svq)
     smp_mb();
 
     if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
-        uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]);
+        uint16_t avail_event = le16_to_cpu(
+                *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]));
         needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1);
     } else {
-        needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
+        needs_kick =
+                !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY));
     }
 
     if (!needs_kick) {
@@ -365,7 +367,7 @@  static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
         return true;
     }
 
-    svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx);
+    svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx);
 
     return svq->last_used_idx != svq->shadow_used_idx;
 }
@@ -383,7 +385,7 @@  static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
 {
     if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
         uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
-        *used_event = svq->shadow_used_idx;
+        *used_event = cpu_to_le16(svq->shadow_used_idx);
     } else {
         svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
     }
@@ -408,7 +410,7 @@  static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
                                              uint16_t num, uint16_t i)
 {
     for (uint16_t j = 0; j < (num - 1); ++j) {
-        i = le16_to_cpu(svq->desc_next[i]);
+        i = svq->desc_next[i];
     }
 
     return i;
@@ -683,7 +685,7 @@  void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
     svq->desc_state = g_new0(SVQDescState, svq->vring.num);
     svq->desc_next = g_new0(uint16_t, svq->vring.num);
     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
-        svq->desc_next[i] = cpu_to_le16(i + 1);
+        svq->desc_next[i] = i + 1;
     }
 }