@@ -1024,6 +1024,68 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
}
}
+static void virtqueue_ordered_flush(VirtQueue *vq)
+{
+ unsigned int i = vq->used_idx;
+ unsigned int ndescs = 0;
+ uint16_t old = vq->used_idx;
+ bool packed;
+ VRingUsedElem uelem;
+
+ packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
+
+ if (packed) {
+ if (unlikely(!vq->vring.desc)) {
+ return;
+ }
+ } else if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
+ /* First expected in-order element isn't ready, nothing to do */
+ if (!vq->used_elems[i].in_order_filled) {
+ return;
+ }
+
+ /* Search for filled elements in-order */
+ while (vq->used_elems[i].in_order_filled) {
+ /*
+ * First entry for packed VQs is written last so the guest
+ * doesn't see invalid descriptors.
+ */
+ if (packed && i != vq->used_idx) {
+ virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
+ } else if (!packed) {
+ uelem.id = vq->used_elems[i].index;
+ uelem.len = vq->used_elems[i].len;
+ vring_used_write(vq, &uelem, i);
+ }
+
+ vq->used_elems[i].in_order_filled = false;
+ ndescs += vq->used_elems[i].ndescs;
+ i += ndescs;
+ if (i >= vq->vring.num) {
+ i -= vq->vring.num;
+ }
+ }
+
+ if (packed) {
+ virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
+ vq->used_idx += ndescs;
+ if (vq->used_idx >= vq->vring.num) {
+ vq->used_idx -= vq->vring.num;
+ vq->used_wrap_counter ^= 1;
+ vq->signalled_used_valid = false;
+ }
+ } else {
+ vring_used_idx_set(vq, i);
+ if (unlikely((int16_t)(i - vq->signalled_used) < (uint16_t)(i - old))) {
+ vq->signalled_used_valid = false;
+ }
+ }
+ vq->inuse -= ndescs;
+}
+
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
if (virtio_device_disabled(vq->vdev)) {
@@ -1031,7 +1093,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
return;
}
- if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
+ virtqueue_ordered_flush(vq);
+ } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_flush(vq, count);
} else {
virtqueue_split_flush(vq, count);
Add VIRTIO_F_IN_ORDER feature support for the virtqueue_flush operation. The goal of the virtqueue_ordered_flush operation when the VIRTIO_F_IN_ORDER feature has been negotiated is to write elements to the used/descriptor ring in-order and then update used_idx. The function iterates through the VirtQueueElement used_elems array in-order starting at vq->used_idx. If the element is valid (filled), the element is written to the used/descriptor ring. This process continues until we find an invalid (not filled) element. For packed VQs, the first entry (at vq->used_idx) is written to the descriptor ring last so the guest doesn't see any invalid descriptors. If any elements were written, the used_idx is updated. Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> --- hw/virtio/virtio.c | 66 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-)