From patchwork Thu Sep 2 15:00:57 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alex Williamson X-Patchwork-Id: 149441 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o82F15pf006334 for ; Thu, 2 Sep 2010 15:01:05 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755226Ab0IBPBC (ORCPT ); Thu, 2 Sep 2010 11:01:02 -0400 Received: from mx1.redhat.com ([209.132.183.28]:26264 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754629Ab0IBPBB (ORCPT ); Thu, 2 Sep 2010 11:01:01 -0400 Received: from int-mx01.intmail.prod.int.phx2.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o82F0wdg018394 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 2 Sep 2010 11:00:59 -0400 Received: from s20.home (ovpn01.gateway.prod.ext.phx2.redhat.com [10.5.9.1]) by int-mx01.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o82F0vr3023871; Thu, 2 Sep 2010 11:00:58 -0400 From: Alex Williamson Subject: [PATCH v2 2/4] virtio-net: Limit number of packets sent per TX flush To: qemu-devel@nongnu.org Cc: kvm@vger.kernel.org, alex.williamson@redhat.com, anthony@codemonkey.ws, mst@redhat.com, chrisw@redhat.com, quintela@redhat.com, jes.sorensen@redhat.com Date: Thu, 02 Sep 2010 09:00:57 -0600 Message-ID: <20100902150057.11862.4754.stgit@s20.home> In-Reply-To: <20100902150041.11862.65901.stgit@s20.home> References: <20100902150041.11862.65901.stgit@s20.home> User-Agent: StGIT/0.14.3 MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.67 on 10.5.11.11 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Thu, 02 Sep 2010 15:01:05 +0000 (UTC) diff --git a/hw/s390-virtio-bus.c b/hw/s390-virtio-bus.c index d5cb24e..092e65f 100644 --- a/hw/s390-virtio-bus.c +++ b/hw/s390-virtio-bus.c @@ -330,6 +330,8 @@ static VirtIOS390DeviceInfo s390_virtio_net = { DEFINE_NIC_PROPERTIES(VirtIOS390Device, nic), DEFINE_PROP_UINT32("x-txtimer", VirtIOS390Device, net.txtimer, TX_TIMER_INTERVAL), + DEFINE_PROP_INT32("x-txburst", VirtIOS390Device, + net.txburst, TX_BURST), DEFINE_PROP_END_OF_LIST(), }, }; diff --git a/hw/syborg_virtio.c b/hw/syborg_virtio.c index 5665189..3c3f3b0 100644 --- a/hw/syborg_virtio.c +++ b/hw/syborg_virtio.c @@ -298,6 +298,8 @@ static SysBusDeviceInfo syborg_virtio_net_info = { DEFINE_VIRTIO_NET_FEATURES(SyborgVirtIOProxy, host_features), DEFINE_PROP_UINT32("x-txtimer", SyborgVirtIOProxy, net.txtimer, TX_TIMER_INTERVAL), + DEFINE_PROP_INT32("x-txburst", SyborgVirtIOProxy, + net.txburst, TX_BURST), DEFINE_PROP_END_OF_LIST(), } }; diff --git a/hw/virtio-net.c b/hw/virtio-net.c index d5b03ab..55f3d94 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -37,6 +37,7 @@ typedef struct VirtIONet NICState *nic; QEMUTimer *tx_timer; uint32_t tx_timeout; + int32_t tx_burst; int tx_timer_active; uint32_t has_vnet_hdr; uint8_t has_ufo; @@ -620,7 +621,7 @@ static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_ return size; } -static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq); +static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq); static void virtio_net_tx_complete(VLANClientState *nc, ssize_t len) { @@ -636,16 +637,18 @@ static void virtio_net_tx_complete(VLANClientState *nc, ssize_t len) } /* TX */ -static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) +static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; + int32_t num_packets = 0; - if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) - return; + if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return num_packets; + } if (n->async_tx.elem.out_num) { virtio_queue_set_notification(n->tx_vq, 0); - return; + return num_packets; } while (virtqueue_pop(vq, &elem)) { @@ -682,14 +685,19 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) virtio_queue_set_notification(n->tx_vq, 0); n->async_tx.elem = elem; n->async_tx.len = len; - return; + return -EBUSY; } len += ret; virtqueue_push(vq, &elem, len); virtio_notify(&n->vdev, vq); + + if (++num_packets >= n->tx_burst) { + break; + } } + return num_packets; } static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq) @@ -934,6 +942,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf, n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n); n->tx_timer_active = 0; n->tx_timeout = net->txtimer; + n->tx_burst = net->txburst; n->mergeable_rx_bufs = 0; n->promisc = 1; /* for compatibility */ diff --git a/hw/virtio-net.h b/hw/virtio-net.h index 46a2e1c..a2d1545 100644 --- a/hw/virtio-net.h +++ b/hw/virtio-net.h @@ -49,9 +49,17 @@ #define TX_TIMER_INTERVAL 150000 /* 150 us */ +/* Limit the number of packets that can be sent via a single flush + * of the TX queue. This gives us a guaranteed exit condition and + * ensures fairness in the io path. 256 conveniently matches the + * length of the TX queue and shows a good balance of performance + * and latency. */ +#define TX_BURST 256 + typedef struct virtio_net_conf { uint32_t txtimer; + int32_t txburst; } virtio_net_conf; /* Maximum packet size we can receive from tap device: header + 64k */ diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c index 1af48e2..3a5b3e6 100644 --- a/hw/virtio-pci.c +++ b/hw/virtio-pci.c @@ -693,6 +693,8 @@ static PCIDeviceInfo virtio_info[] = { DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic), DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy, net.txtimer, TX_TIMER_INTERVAL), + DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy, + net.txburst, TX_BURST), DEFINE_PROP_END_OF_LIST(), }, .qdev.reset = virtio_pci_reset,