Message ID | CALwKnOiWKiXMVwvvZj9pFdJnJ6Q5Z_Ar=Q7rEEp6AC6_vKs3Ww@mail.gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Siddharth Gupta <siddharthgupta04@gmail.com> writes: > From 032be62f56a207833ae12cc9474e3e8be5ed8eb4 Mon Sep 17 00:00:00 2001 > From: Siddharth Gupta <siddharthgupta04@gmail.com> > Date: Fri, 11 Mar 2016 20:10:41 +0530 > Subject: [PATCH] > bitesizedtasks-large_frames-hw_dma_xilinx-hw_net_virtio I think the subject should be what was done rather than where the work came from. Paolo has already mentioned that this may not be a good idea, especially for virtio so the following comments are general QEMU style comments for future reference. > > --- > hw/dma/xilinx_axidma.c | 5 ++++- > hw/net/virtio-net.c | 11 ++++++++++- > 2 files changed, 14 insertions(+), 2 deletions(-) > > diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c > index ce5c1e6..9c6bda2 100644 > --- a/hw/dma/xilinx_axidma.c > +++ b/hw/dma/xilinx_axidma.c > @@ -255,13 +255,15 @@ static void stream_process_mem2s(struct Stream *s, > StreamSlave *tx_data_dev, > StreamSlave *tx_control_dev) > { > uint32_t prev_d; > - unsigned char txbuf[16 * 1024]; > + unsigned char *txbuf; > unsigned int txlen; > > if (!stream_running(s) || stream_idle(s)) { > return; > } > > + txbuf = (unsigned char *) malloc(16 * 1024 * sizeof(unsigned > char)); QEMU uses g_malloc/g_free wrappers for memory allocation. For one thing g_malloc can never fail where as here you may end up with a NULL ptr. > + > while (1) { > stream_desc_load(s, s->regs[R_CURDESC]); > > @@ -303,6 +305,7 @@ static void stream_process_mem2s(struct Stream *s, > StreamSlave *tx_data_dev, > break; > } > } > + free(txbuf); > } > > static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c > index 5798f87..ba6ebac 100644 > --- a/hw/net/virtio-net.c > +++ b/hw/net/virtio-net.c > @@ -1094,7 +1094,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, > const uint8_t *buf, size_t > VirtIONet *n = qemu_get_nic_opaque(nc); > VirtIONetQueue *q = virtio_net_get_subqueue(nc); > VirtIODevice *vdev = VIRTIO_DEVICE(n); > - struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; > + struct iovec *mhdr_sg; > struct virtio_net_hdr_mrg_rxbuf mhdr; > unsigned mhdr_cnt = 0; > size_t offset, i, guest_offset; > @@ -1113,6 +1113,8 @@ static ssize_t virtio_net_receive(NetClientState *nc, > const uint8_t *buf, size_t > > offset = i = 0; > > + mhdr_sg = (struct iovec *) malloc(VIRTQUEUE_MAX_SIZE * sizeof(struct > iovec)); > + > while (offset < size) { > VirtQueueElement *elem; > int len, total; > @@ -1122,6 +1124,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, > const uint8_t *buf, size_t > > elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); > if (!elem) { > + free(mhdr_sg); > if (i == 0) > return -1; > error_report("virtio-net unexpected empty queue: " > @@ -1136,11 +1139,15 @@ static ssize_t virtio_net_receive(NetClientState > *nc, const uint8_t *buf, size_t > > if (elem->in_num < 1) { > error_report("virtio-net receive queue contains no in > buffers"); > + free(mhdr_sg); > exit(1); > } > > sg = elem->in_sg; > if (i == 0) { > + if (offset != 0) { > + free(mhdr_sg); > + } > assert(offset == 0); > if (n->mergeable_rx_bufs) { > mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), > @@ -1168,6 +1175,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, > const uint8_t *buf, size_t > if (!n->mergeable_rx_bufs && offset < size) { > virtqueue_discard(q->rx_vq, elem, total); > g_free(elem); > + free(mhdr_sg); > return size; > } > > @@ -1186,6 +1194,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, > const uint8_t *buf, size_t > virtqueue_flush(q->rx_vq, i); > virtio_notify(vdev, q->rx_vq); > > + free(mhdr_sg); > return size; > } -- Alex Bennée
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index ce5c1e6..9c6bda2 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -255,13 +255,15 @@ static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev, StreamSlave *tx_control_dev) { uint32_t prev_d; - unsigned char txbuf[16 * 1024]; + unsigned char *txbuf; unsigned int txlen; if (!stream_running(s) || stream_idle(s)) { return; } + txbuf = (unsigned char *) malloc(16 * 1024 * sizeof(unsigned char)); + while (1) { stream_desc_load(s, s->regs[R_CURDESC]); @@ -303,6 +305,7 @@ static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev, break; } } + free(txbuf); } static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 5798f87..ba6ebac 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1094,7 +1094,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); VirtIODevice *vdev = VIRTIO_DEVICE(n); - struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; + struct iovec *mhdr_sg; struct virtio_net_hdr_mrg_rxbuf mhdr; unsigned mhdr_cnt = 0;
From 032be62f56a207833ae12cc9474e3e8be5ed8eb4 Mon Sep 17 00:00:00 2001 From: Siddharth Gupta <siddharthgupta04@gmail.com> Date: Fri, 11 Mar 2016 20:10:41 +0530 Subject: [PATCH] bitesizedtasks-large_frames-hw_dma_xilinx-hw_net_virtio --- hw/dma/xilinx_axidma.c | 5 ++++- hw/net/virtio-net.c | 11 ++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) size_t offset, i, guest_offset; @@ -1113,6 +1113,8 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t offset = i = 0; + mhdr_sg = (struct iovec *) malloc(VIRTQUEUE_MAX_SIZE * sizeof(struct iovec)); + while (offset < size) { VirtQueueElement *elem; int len, total; @@ -1122,6 +1124,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); if (!elem) { + free(mhdr_sg); if (i == 0) return -1; error_report("virtio-net unexpected empty queue: " @@ -1136,11 +1139,15 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t if (elem->in_num < 1) { error_report("virtio-net receive queue contains no in buffers"); + free(mhdr_sg); exit(1); } sg = elem->in_sg; if (i == 0) { + if (offset != 0) { + free(mhdr_sg); + } assert(offset == 0); if (n->mergeable_rx_bufs) { mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), @@ -1168,6 +1175,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t if (!n->mergeable_rx_bufs && offset < size) { virtqueue_discard(q->rx_vq, elem, total); g_free(elem); + free(mhdr_sg); return size; } @@ -1186,6 +1194,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t virtqueue_flush(q->rx_vq, i); virtio_notify(vdev, q->rx_vq); + free(mhdr_sg); return size; }