diff mbox

[v3,1/2] KVM: MMIO: Lock coalesced device when checking for available entry

Message ID 1310998635-31608-1-git-send-email-levinsasha928@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sasha Levin July 18, 2011, 2:17 p.m. UTC
Move the check whether there are available entries to within the spinlock.
This allows working with larger amount of VCPUs and reduces premature
exits when using a large number of VCPUs.

Cc: Avi Kivity <avi@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 virt/kvm/coalesced_mmio.c |   42 +++++++++++++++++++++++++++---------------
 1 files changed, 27 insertions(+), 15 deletions(-)

Comments

Avi Kivity July 18, 2011, 2:24 p.m. UTC | #1
On 07/18/2011 05:17 PM, Sasha Levin wrote:
> Move the check whether there are available entries to within the spinlock.
> This allows working with larger amount of VCPUs and reduces premature
> exits when using a large number of VCPUs.

Looks good, with the fixed kvm@ address too.
Marcelo Tosatti July 19, 2011, 4:02 p.m. UTC | #2
On Mon, Jul 18, 2011 at 05:17:14PM +0300, Sasha Levin wrote:
> Move the check whether there are available entries to within the spinlock.
> This allows working with larger amount of VCPUs and reduces premature
> exits when using a large number of VCPUs.

Applied both, thanks.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index fc84875..ae075dc 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -25,23 +25,8 @@  static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
 				   gpa_t addr, int len)
 {
 	struct kvm_coalesced_mmio_zone *zone;
-	struct kvm_coalesced_mmio_ring *ring;
-	unsigned avail;
 	int i;
 
-	/* Are we able to batch it ? */
-
-	/* last is the first free entry
-	 * check if we don't meet the first used entry
-	 * there is always one unused entry in the buffer
-	 */
-	ring = dev->kvm->coalesced_mmio_ring;
-	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
-	if (avail < KVM_MAX_VCPUS) {
-		/* full */
-		return 0;
-	}
-
 	/* is it in a batchable area ? */
 
 	for (i = 0; i < dev->nb_zones; i++) {
@@ -58,16 +43,43 @@  static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
 	return 0;
 }
 
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+{
+	struct kvm_coalesced_mmio_ring *ring;
+	unsigned avail;
+
+	/* Are we able to batch it ? */
+
+	/* last is the first free entry
+	 * check if we don't meet the first used entry
+	 * there is always one unused entry in the buffer
+	 */
+	ring = dev->kvm->coalesced_mmio_ring;
+	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+	if (avail == 0) {
+		/* full */
+		return 0;
+	}
+
+	return 1;
+}
+
 static int coalesced_mmio_write(struct kvm_io_device *this,
 				gpa_t addr, int len, const void *val)
 {
 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+
 	if (!coalesced_mmio_in_range(dev, addr, len))
 		return -EOPNOTSUPP;
 
 	spin_lock(&dev->lock);
 
+	if (!coalesced_mmio_has_room(dev)) {
+		spin_unlock(&dev->lock);
+		return -EOPNOTSUPP;
+	}
+
 	/* copy data in first free entry of the ring */
 
 	ring->coalesced_mmio[ring->last].phys_addr = addr;