@@ -11,7 +11,7 @@ typedef struct V9fsVirtioState
VirtQueue *vq;
size_t config_size;
V9fsPDU pdus[MAX_REQ];
- VirtQueueElement elems[MAX_REQ];
+ VirtQueueElement *elems[MAX_REQ];
V9fsState state;
} V9fsVirtioState;
@@ -44,7 +44,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
-int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem);
+void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz);
void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
int len);
@@ -37,7 +37,7 @@ typedef struct VirtIOBalloon {
uint32_t num_pages;
uint32_t actual;
uint64_t stats[VIRTIO_BALLOON_S_NR];
- VirtQueueElement stats_vq_elem;
+ VirtQueueElement *stats_vq_elem;
size_t stats_vq_offset;
QEMUTimer *stats_timer;
int64_t stats_last_update;
@@ -80,8 +80,7 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
-VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s);
-
+void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req);
void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);
@@ -47,7 +47,7 @@ typedef struct VirtIONetQueue {
QEMUBH *tx_bh;
int tx_waiting;
struct {
- VirtQueueElement elem;
+ VirtQueueElement *elem;
} async_tx;
struct VirtIONet *n;
} VirtIONetQueue;
@@ -160,7 +160,7 @@ void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp);
void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req);
bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req);
void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req);
-VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq);
+void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req);
void virtio_scsi_free_req(VirtIOSCSIReq *req);
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason);
@@ -122,7 +122,7 @@ struct VirtIOSerialPort {
* element popped and continue consuming it once the backend
* becomes writable again.
*/
- VirtQueueElement elem;
+ VirtQueueElement *elem;
/*
* The index and the offset into the iov buffer that was popped in
@@ -152,7 +152,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx);
void virtqueue_map(VirtQueueElement *elem);
-int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
+void *virtqueue_pop(VirtQueue *vq, size_t sz);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
@@ -1587,7 +1587,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
int read_count;
int64_t xattr_len;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = &v->elems[pdu->idx];
+ VirtQueueElement *elem = v->elems[pdu->idx];
xattr_len = fidp->fs.xattr.len;
read_count = xattr_len - off;
@@ -26,10 +26,12 @@ void virtio_9p_push_and_notify(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = &v->elems[pdu->idx];
+ VirtQueueElement *elem = v->elems[pdu->idx];
/* push onto queue and notify */
virtqueue_push(v->vq, elem, pdu->size);
+ g_free(elem);
+ v->elems[pdu->idx] = NULL;
/* FIXME: we should batch these completions */
virtio_notify(VIRTIO_DEVICE(v), v->vq);
@@ -48,10 +50,10 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED out;
- VirtQueueElement *elem = &v->elems[pdu->idx];
+ VirtQueueElement *elem;
- len = virtqueue_pop(vq, elem);
- if (!len) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
pdu_free(pdu);
break;
}
@@ -59,6 +61,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
BUG_ON(elem->out_num == 0 || elem->in_num == 0);
QEMU_BUILD_BUG_ON(sizeof out != 7);
+ v->elems[pdu->idx] = elem;
len = iov_to_buf(elem->out_sg, elem->out_num, 0,
&out, sizeof out);
BUG_ON(len != sizeof out);
@@ -141,7 +144,7 @@ ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = &v->elems[pdu->idx];
+ VirtQueueElement *elem = v->elems[pdu->idx];
return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
}
@@ -151,7 +154,7 @@ ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = &v->elems[pdu->idx];
+ VirtQueueElement *elem = v->elems[pdu->idx];
return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap);
}
@@ -161,7 +164,7 @@ void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = &v->elems[pdu->idx];
+ VirtQueueElement *elem = v->elems[pdu->idx];
if (is_write) {
*piov = elem->out_sg;
@@ -100,20 +100,19 @@ static void handle_notify(EventNotifier *e)
blk_io_plug(s->conf->conf.blk);
for (;;) {
MultiReqBuffer mrb = {};
- int ret;
/* Disable guest->host notifies to avoid unnecessary vmexits */
vring_disable_notification(s->vdev, &s->vring);
for (;;) {
- VirtIOBlockReq *req = virtio_blk_alloc_request(vblk);
+ VirtIOBlockReq *req = vring_pop(s->vdev, &s->vring,
+ sizeof(VirtIOBlockReq));
- ret = vring_pop(s->vdev, &s->vring, &req->elem);
- if (ret < 0) {
- virtio_blk_free_request(req);
+ if (req == NULL) {
break; /* no more requests */
}
+ virtio_blk_init_request(vblk, req);
trace_virtio_blk_data_plane_process_request(s, req->elem.out_num,
req->elem.in_num,
req->elem.index);
@@ -125,7 +124,7 @@ static void handle_notify(EventNotifier *e)
virtio_blk_submit_multireq(s->conf->conf.blk, &mrb);
}
- if (likely(ret == -EAGAIN)) { /* vring emptied */
+ if (likely(!vring_more_avail(s->vdev, &s->vring))) { /* vring emptied */
/* Re-enable guest->host notifies and stop processing the vring.
* But if the guest has snuck in more descriptors, keep processing.
*/
@@ -29,15 +29,13 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
-VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
+void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req)
{
- VirtIOBlockReq *req = g_new(VirtIOBlockReq, 1);
req->dev = s;
req->qiov.size = 0;
req->in_len = 0;
req->next = NULL;
req->mr_next = NULL;
- return req;
}
void virtio_blk_free_request(VirtIOBlockReq *req)
@@ -193,13 +191,11 @@ out:
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
{
- VirtIOBlockReq *req = virtio_blk_alloc_request(s);
+ VirtIOBlockReq *req = virtqueue_pop(s->vq, sizeof(VirtIOBlockReq));
- if (!virtqueue_pop(s->vq, &req->elem)) {
- virtio_blk_free_request(req);
- return NULL;
+ if (req) {
+ virtio_blk_init_request(s, req);
}
-
return req;
}
@@ -836,7 +832,8 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
VirtIOBlock *s = VIRTIO_BLK(vdev);
while (qemu_get_sbyte(f)) {
- VirtIOBlockReq *req = virtio_blk_alloc_request(s);
+ VirtIOBlockReq *req = g_new(VirtIOBlockReq, 1);
+ virtio_blk_init_request(s, req);
qemu_get_buffer(f, (unsigned char *)&req->elem,
sizeof(VirtQueueElement));
req->next = s->rq;
@@ -83,7 +83,7 @@ static bool use_multiport(VirtIOSerial *vser)
static size_t write_to_port(VirtIOSerialPort *port,
const uint8_t *buf, size_t size)
{
- VirtQueueElement elem;
+ VirtQueueElement *elem;
VirtQueue *vq;
size_t offset;
@@ -96,15 +96,17 @@ static size_t write_to_port(VirtIOSerialPort *port,
while (offset < size) {
size_t len;
- if (!virtqueue_pop(vq, &elem)) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
break;
}
- len = iov_from_buf(elem.in_sg, elem.in_num, 0,
+ len = iov_from_buf(elem->in_sg, elem->in_num, 0,
buf + offset, size - offset);
offset += len;
- virtqueue_push(vq, &elem, len);
+ virtqueue_push(vq, elem, len);
+ g_free(elem);
}
virtio_notify(VIRTIO_DEVICE(port->vser), vq);
@@ -113,13 +115,18 @@ static size_t write_to_port(VirtIOSerialPort *port,
static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev)
{
- VirtQueueElement elem;
+ VirtQueueElement *elem;
if (!virtio_queue_ready(vq)) {
return;
}
- while (virtqueue_pop(vq, &elem)) {
- virtqueue_push(vq, &elem, 0);
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+ virtqueue_push(vq, elem, 0);
+ g_free(elem);
}
virtio_notify(vdev, vq);
}
@@ -138,21 +145,22 @@ static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
unsigned int i;
/* Pop an elem only if we haven't left off a previous one mid-way */
- if (!port->elem.out_num) {
- if (!virtqueue_pop(vq, &port->elem)) {
+ if (!port->elem) {
+ port->elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!port->elem) {
break;
}
port->iov_idx = 0;
port->iov_offset = 0;
}
- for (i = port->iov_idx; i < port->elem.out_num; i++) {
+ for (i = port->iov_idx; i < port->elem->out_num; i++) {
size_t buf_size;
ssize_t ret;
- buf_size = port->elem.out_sg[i].iov_len - port->iov_offset;
+ buf_size = port->elem->out_sg[i].iov_len - port->iov_offset;
ret = vsc->have_data(port,
- port->elem.out_sg[i].iov_base
+ port->elem->out_sg[i].iov_base
+ port->iov_offset,
buf_size);
if (port->throttled) {
@@ -167,8 +175,9 @@ static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
if (port->throttled) {
break;
}
- virtqueue_push(vq, &port->elem, 0);
- port->elem.out_num = 0;
+ virtqueue_push(vq, port->elem, 0);
+ g_free(port->elem);
+ port->elem = NULL;
}
virtio_notify(vdev, vq);
}
@@ -185,22 +194,26 @@ static void flush_queued_data(VirtIOSerialPort *port)
static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len)
{
- VirtQueueElement elem;
+ VirtQueueElement *elem;
VirtQueue *vq;
vq = vser->c_ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
- if (!virtqueue_pop(vq, &elem)) {
+
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
return 0;
}
/* TODO: detect a buffer that's too short, set NEEDS_RESET */
- iov_from_buf(elem.in_sg, elem.in_num, 0, buf, len);
+ iov_from_buf(elem->in_sg, elem->in_num, 0, buf, len);
- virtqueue_push(vq, &elem, len);
+ virtqueue_push(vq, elem, len);
virtio_notify(VIRTIO_DEVICE(vser), vq);
+ g_free(elem);
+
return len;
}
@@ -414,7 +427,7 @@ static void control_in(VirtIODevice *vdev, VirtQueue *vq)
static void control_out(VirtIODevice *vdev, VirtQueue *vq)
{
- VirtQueueElement elem;
+ VirtQueueElement *elem;
VirtIOSerial *vser;
uint8_t *buf;
size_t len;
@@ -423,10 +436,15 @@ static void control_out(VirtIODevice *vdev, VirtQueue *vq)
len = 0;
buf = NULL;
- while (virtqueue_pop(vq, &elem)) {
+ for (;;) {
size_t cur_len;
- cur_len = iov_size(elem.out_sg, elem.out_num);
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+
+ cur_len = iov_size(elem->out_sg, elem->out_num);
/*
* Allocate a new buf only if we didn't have one previously or
* if the size of the buf differs
@@ -437,10 +455,11 @@ static void control_out(VirtIODevice *vdev, VirtQueue *vq)
buf = g_malloc(cur_len);
len = cur_len;
}
- iov_to_buf(elem.out_sg, elem.out_num, 0, buf, cur_len);
+ iov_to_buf(elem->out_sg, elem->out_num, 0, buf, cur_len);
handle_control_message(vser, buf, cur_len);
- virtqueue_push(vq, &elem, 0);
+ virtqueue_push(vq, elem, 0);
+ g_free(elem);
}
g_free(buf);
virtio_notify(vdev, vq);
@@ -620,7 +639,7 @@ static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f)
qemu_put_byte(f, port->host_connected);
elem_popped = 0;
- if (port->elem.out_num) {
+ if (port->elem) {
elem_popped = 1;
}
qemu_put_be32s(f, &elem_popped);
@@ -628,8 +647,8 @@ static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f)
qemu_put_be32s(f, &port->iov_idx);
qemu_put_be64s(f, &port->iov_offset);
- qemu_put_buffer(f, (unsigned char *)&port->elem,
- sizeof(port->elem));
+ qemu_put_buffer(f, (unsigned char *)port->elem,
+ sizeof(VirtQueueElement));
}
}
}
@@ -704,9 +723,10 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id,
qemu_get_be32s(f, &port->iov_idx);
qemu_get_be64s(f, &port->iov_offset);
- qemu_get_buffer(f, (unsigned char *)&port->elem,
- sizeof(port->elem));
- virtqueue_map(&port->elem);
+ port->elem = g_new(VirtQueueElement, 1);
+ qemu_get_buffer(f, (unsigned char *)port->elem,
+ sizeof(VirtQueueElement));
+ virtqueue_map(port->elem);
/*
* Port was throttled on source machine. Let's
@@ -928,7 +948,7 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp)
return;
}
- port->elem.out_num = 0;
+ port->elem = NULL;
}
static void virtser_port_device_plug(HotplugHandler *hotplug_dev,
@@ -804,16 +804,15 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
#endif
- cmd = g_new(struct virtio_gpu_ctrl_command, 1);
- while (virtqueue_pop(vq, &cmd->elem)) {
+ cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
+ while (cmd) {
cmd->vq = vq;
cmd->error = 0;
cmd->finished = false;
cmd->waiting = false;
QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
- cmd = g_new(struct virtio_gpu_ctrl_command, 1);
+ cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
}
- g_free(cmd);
virtio_gpu_process_cmdq(g);
@@ -833,15 +832,20 @@ static void virtio_gpu_ctrl_bh(void *opaque)
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
- VirtQueueElement elem;
+ VirtQueueElement *elem;
size_t s;
struct virtio_gpu_update_cursor cursor_info;
if (!virtio_queue_ready(vq)) {
return;
}
- while (virtqueue_pop(vq, &elem)) {
- s = iov_to_buf(elem.out_sg, elem.out_num, 0,
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+
+ s = iov_to_buf(elem->out_sg, elem->out_num, 0,
&cursor_info, sizeof(cursor_info));
if (s != sizeof(cursor_info)) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -850,8 +854,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
} else {
update_cursor(g, &cursor_info);
}
- virtqueue_push(vq, &elem, 0);
+ virtqueue_push(vq, elem, 0);
virtio_notify(vdev, vq);
+ g_free(elem);
}
}
@@ -17,7 +17,7 @@
void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event)
{
- VirtQueueElement elem;
+ VirtQueueElement *elem;
unsigned have, need;
int i, len;
@@ -50,14 +50,16 @@ void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event)
/* ... and finally pass them to the guest */
for (i = 0; i < vinput->qindex; i++) {
- if (!virtqueue_pop(vinput->evt, &elem)) {
+ elem = virtqueue_pop(vinput->evt, sizeof(VirtQueueElement));
+ if (!elem) {
/* should not happen, we've checked for space beforehand */
fprintf(stderr, "%s: Huh? No vq elem available ...\n", __func__);
return;
}
- len = iov_from_buf(elem.in_sg, elem.in_num,
+ len = iov_from_buf(elem->in_sg, elem->in_num,
0, vinput->queue+i, sizeof(virtio_input_event));
- virtqueue_push(vinput->evt, &elem, len);
+ virtqueue_push(vinput->evt, elem, len);
+ g_free(elem);
}
virtio_notify(VIRTIO_DEVICE(vinput), vinput->evt);
vinput->qindex = 0;
@@ -73,17 +75,23 @@ static void virtio_input_handle_sts(VirtIODevice *vdev, VirtQueue *vq)
VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev);
VirtIOInput *vinput = VIRTIO_INPUT(vdev);
virtio_input_event event;
- VirtQueueElement elem;
+ VirtQueueElement *elem;
int len;
- while (virtqueue_pop(vinput->sts, &elem)) {
+ for (;;) {
+ elem = virtqueue_pop(vinput->sts, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+
memset(&event, 0, sizeof(event));
- len = iov_to_buf(elem.out_sg, elem.out_num,
+ len = iov_to_buf(elem->out_sg, elem->out_num,
0, &event, sizeof(event));
if (vic->handle_status) {
vic->handle_status(vinput, &event);
}
- virtqueue_push(vinput->sts, &elem, len);
+ virtqueue_push(vinput->sts, elem, len);
+ g_free(elem);
}
virtio_notify(vdev, vinput->sts);
}
@@ -819,20 +819,24 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
- VirtQueueElement elem;
+ VirtQueueElement *elem;
size_t s;
struct iovec *iov, *iov2;
unsigned int iov_cnt;
- while (virtqueue_pop(vq, &elem)) {
- if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
- iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+ if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
+ iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
error_report("virtio-net ctrl missing headers");
exit(1);
}
- iov_cnt = elem.out_num;
- iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
+ iov_cnt = elem->out_num;
+ iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
@@ -851,12 +855,13 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
}
- s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
+ s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
- virtqueue_push(vq, &elem, sizeof(status));
+ virtqueue_push(vq, elem, sizeof(status));
virtio_notify(vdev, vq);
g_free(iov2);
+ g_free(elem);
}
}
@@ -1045,13 +1050,14 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
offset = i = 0;
while (offset < size) {
- VirtQueueElement elem;
+ VirtQueueElement *elem;
int len, total;
- const struct iovec *sg = elem.in_sg;
+ const struct iovec *sg;
total = 0;
- if (virtqueue_pop(q->rx_vq, &elem) == 0) {
+ elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
if (i == 0)
return -1;
error_report("virtio-net unexpected empty queue: "
@@ -1064,21 +1070,22 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
exit(1);
}
- if (elem.in_num < 1) {
+ if (elem->in_num < 1) {
error_report("virtio-net receive queue contains no in buffers");
exit(1);
}
+ sg = elem->in_sg;
if (i == 0) {
assert(offset == 0);
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
- sg, elem.in_num,
+ sg, elem->in_num,
offsetof(typeof(mhdr), num_buffers),
sizeof(mhdr.num_buffers));
}
- receive_header(n, sg, elem.in_num, buf, size);
+ receive_header(n, sg, elem->in_num, buf, size);
offset = n->host_hdr_len;
total += n->guest_hdr_len;
guest_offset = n->guest_hdr_len;
@@ -1087,7 +1094,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
}
/* copy in packet. ugh */
- len = iov_from_buf(sg, elem.in_num, guest_offset,
+ len = iov_from_buf(sg, elem->in_num, guest_offset,
buf + offset, size - offset);
total += len;
offset += len;
@@ -1095,12 +1102,14 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
- virtqueue_discard(q->rx_vq, &elem, total);
+ virtqueue_discard(q->rx_vq, elem, total);
+ g_free(elem);
return size;
}
/* signal other side */
- virtqueue_fill(q->rx_vq, &elem, total, i++);
+ virtqueue_fill(q->rx_vq, elem, total, i++);
+ g_free(elem);
}
if (mhdr_cnt) {
@@ -1124,10 +1133,11 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
+ virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
virtio_notify(vdev, q->tx_vq);
- q->async_tx.elem.out_num = 0;
+ g_free(q->async_tx.elem);
+ q->async_tx.elem = NULL;
virtio_queue_set_notification(q->tx_vq, 1);
virtio_net_flush_tx(q);
@@ -1138,25 +1148,31 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
{
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- VirtQueueElement elem;
+ VirtQueueElement *elem;
int32_t num_packets = 0;
int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return num_packets;
}
- if (q->async_tx.elem.out_num) {
+ if (q->async_tx.elem) {
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
}
- while (virtqueue_pop(q->tx_vq, &elem)) {
+ for (;;) {
ssize_t ret;
- unsigned int out_num = elem.out_num;
- struct iovec *out_sg = &elem.out_sg[0];
- struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1];
+ unsigned int out_num;
+ struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
struct virtio_net_hdr_mrg_rxbuf mhdr;
+ elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+
+ out_num = elem->out_num;
+ out_sg = elem->out_sg;
if (out_num < 1) {
error_report("virtio-net header not in first element");
exit(1);
@@ -1208,8 +1224,9 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
}
drop:
- virtqueue_push(q->tx_vq, &elem, 0);
+ virtqueue_push(q->tx_vq, elem, 0);
virtio_notify(vdev, q->tx_vq);
+ g_free(elem);
if (++num_packets >= n->tx_burst) {
break;
@@ -81,15 +81,16 @@ fail_vring:
VirtIOSCSIReq *virtio_scsi_pop_req_vring(VirtIOSCSI *s,
VirtIOSCSIVring *vring)
{
- VirtIOSCSIReq *req = virtio_scsi_init_req(s, NULL);
- int r;
+ VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
+ VirtIOSCSIReq *req;
- req->vring = vring;
- r = vring_pop((VirtIODevice *)s, &vring->vring, &req->elem);
- if (r < 0) {
- virtio_scsi_free_req(req);
- req = NULL;
+ req = vring_pop((VirtIODevice *)s, &vring->vring,
+ sizeof(VirtIOSCSIReq) + vs->cdb_size);
+ if (!req) {
+ return NULL;
}
+ virtio_scsi_init_req(s, NULL, req);
+ req->vring = vring;
return req;
}
@@ -41,19 +41,15 @@ static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
}
-VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq)
+void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
{
- VirtIOSCSIReq *req;
- VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
const size_t zero_skip = offsetof(VirtIOSCSIReq, vring);
- req = g_malloc(sizeof(*req) + vs->cdb_size);
req->vq = vq;
req->dev = s;
qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
qemu_iovec_init(&req->resp_iov, 1);
memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
- return req;
}
void virtio_scsi_free_req(VirtIOSCSIReq *req)
@@ -174,11 +170,14 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
{
- VirtIOSCSIReq *req = virtio_scsi_init_req(s, vq);
- if (!virtqueue_pop(vq, &req->elem)) {
- virtio_scsi_free_req(req);
+ VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
+ VirtIOSCSIReq *req;
+
+ req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
+ if (!req) {
return NULL;
}
+ virtio_scsi_init_req(s, vq, req);
return req;
}
@@ -203,8 +202,9 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
qemu_get_be32s(f, &n);
assert(n < vs->conf.num_queues);
- req = virtio_scsi_init_req(s, vs->cmd_vqs[n]);
+ req = g_malloc(sizeof(VirtIOSCSIReq) + vs->cdb_size);
qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
+ virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
virtqueue_map(&req->elem);
@@ -389,23 +389,26 @@ static void vring_unmap_element(VirtQueueElement *elem)
*
* Stolen from linux/drivers/vhost/vhost.c.
*/
-int vring_pop(VirtIODevice *vdev, Vring *vring,
- VirtQueueElement *elem)
+void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz)
{
struct vring_desc desc;
unsigned int i, head, found = 0, num = vring->vr.num;
uint16_t avail_idx, last_avail_idx;
+ VirtQueueElement *elem = NULL;
int ret;
- /* Initialize elem so it can be safely unmapped */
- elem->in_num = elem->out_num = 0;
-
/* If there was a fatal error then refuse operation */
if (vring->broken) {
ret = -EFAULT;
goto out;
}
+ assert(sz >= sizeof(VirtQueueElement));
+ elem = g_malloc(sz);
+
+ /* Initialize elem so it can be safely unmapped */
+ elem->in_num = elem->out_num = 0;
+
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vring->last_avail_idx;
avail_idx = vring_get_avail_idx(vdev, vring);
@@ -481,7 +484,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
virtio_tswap16(vdev, vring->last_avail_idx);
}
- return head;
+ return elem;
out:
assert(ret < 0);
@@ -489,7 +492,8 @@ out:
vring->broken = true;
}
vring_unmap_element(elem);
- return ret;
+ g_free(elem);
+ return NULL;
}
/* After we've used one of their buffers, we tell them about it.
@@ -107,8 +107,10 @@ static void balloon_stats_poll_cb(void *opaque)
return;
}
- virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset);
+ virtqueue_push(s->svq, s->stats_vq_elem, s->stats_vq_offset);
virtio_notify(vdev, s->svq);
+ g_free(s->stats_vq_elem);
+ s->stats_vq_elem = NULL;
}
static void balloon_stats_get_all(Object *obj, struct Visitor *v,
@@ -206,14 +208,18 @@ static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v,
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
- VirtQueueElement elem;
+ VirtQueueElement *elem;
MemoryRegionSection section;
- while (virtqueue_pop(vq, &elem)) {
+ for (;;) {
size_t offset = 0;
uint32_t pfn;
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ return;
+ }
- while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) {
+ while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) {
ram_addr_t pa;
ram_addr_t addr;
int p = virtio_ldl_p(vdev, &pfn);
@@ -236,20 +242,22 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
memory_region_unref(section.mr);
}
- virtqueue_push(vq, &elem, offset);
+ virtqueue_push(vq, elem, offset);
virtio_notify(vdev, vq);
+ g_free(elem);
}
}
static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
- VirtQueueElement *elem = &s->stats_vq_elem;
+ VirtQueueElement *elem;
VirtIOBalloonStat stat;
size_t offset = 0;
qemu_timeval tv;
- if (!virtqueue_pop(vq, elem)) {
+ s->stats_vq_elem = elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
goto out;
}
@@ -44,7 +44,7 @@ static void chr_read(void *opaque, const void *buf, size_t size)
{
VirtIORNG *vrng = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(vrng);
- VirtQueueElement elem;
+ VirtQueueElement *elem;
size_t len;
int offset;
@@ -56,15 +56,17 @@ static void chr_read(void *opaque, const void *buf, size_t size)
offset = 0;
while (offset < size) {
- if (!virtqueue_pop(vrng->vq, &elem)) {
+ elem = virtqueue_pop(vrng->vq, sizeof(VirtQueueElement));
+ if (!elem) {
break;
}
- len = iov_from_buf(elem.in_sg, elem.in_num,
+ len = iov_from_buf(elem->in_sg, elem->in_num,
0, buf + offset, size - offset);
offset += len;
- virtqueue_push(vrng->vq, &elem, len);
+ virtqueue_push(vrng->vq, elem, len);
trace_virtio_rng_pushed(vrng, len);
+ g_free(elem);
}
virtio_notify(vdev, vrng->vq);
}
@@ -501,16 +501,20 @@ void virtqueue_map(VirtQueueElement *elem)
0);
}
-int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
+void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
hwaddr desc_pa = vq->vring.desc;
VirtIODevice *vdev = vq->vdev;
+ VirtQueueElement *elem;
- if (!virtqueue_num_heads(vq, vq->last_avail_idx))
- return 0;
+ if (!virtqueue_num_heads(vq, vq->last_avail_idx)) {
+ return NULL;
+ }
/* When we start there are none of either input nor output. */
+ assert(sz >= sizeof(VirtQueueElement));
+ elem = g_malloc(sz);
elem->out_num = elem->in_num = 0;
max = vq->vring.num;
@@ -569,7 +573,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
- return elem->in_num + elem->out_num;
+ return elem;
}
/* virtio device */
@@ -1 +1 @@
-Subproject commit 01a84bea2d28a19d2405c1ecac4bdef17683cc0c
+Subproject commit 33fbe13a3e2a01e0ba1087a8feed801a0451db21