From patchwork Thu Jul 5 15:16:25 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paolo Bonzini X-Patchwork-Id: 1161231 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 39F53DFB7C for ; Thu, 5 Jul 2012 15:17:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933154Ab2GEPRL (ORCPT ); Thu, 5 Jul 2012 11:17:11 -0400 Received: from mail-gh0-f174.google.com ([209.85.160.174]:52748 "EHLO mail-gh0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751974Ab2GEPRJ (ORCPT ); Thu, 5 Jul 2012 11:17:09 -0400 Received: by ghrr11 with SMTP id r11so7495851ghr.19 for ; Thu, 05 Jul 2012 08:17:08 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=sender:from:to:cc:subject:date:message-id:x-mailer:in-reply-to :references; bh=wtujFZ1Gyc2n2tLC2FBj5o4+DBDEDTpuS4adxObaaH8=; b=iXZxDszH1x/ijvMfjfDiphU+qJ6W/g92VFU3CJWcibNy8u/Q/EqrLXOR94K28/L0u1 emmAkz9Zptc8tMFOMDR8N2gtvVCNZxQPu6ZvyscM/0RKzWsZdcc0i4lhWuv/xeJxfDzx HGfUhiNC+71QNfUtwsINigs0RpkL35iHfx2T4eKO9ABcEdh/f0GE2Yj/++5+ZdWRVjfz AJbM1rGyQHS3UNvH1Rl0D47KaTaFaUlKEMXAIKe9hu0yg0j+de7ixCh+BvGDpde9JZ/W B+M78nanGjb34JzQ7RrEZRer8wbfu/yma6Tg2pS3C/bmM94WnDKetD4TWxeisVfhioll Xjlg== Received: by 10.66.75.228 with SMTP id f4mr37949808paw.52.1341501428206; Thu, 05 Jul 2012 08:17:08 -0700 (PDT) Received: from yakj.usersys.redhat.com (93-34-189-113.ip51.fastwebnet.it. [93.34.189.113]) by mx.google.com with ESMTPS id jv6sm19931888pbc.40.2012.07.05.08.17.04 (version=TLSv1/SSLv3 cipher=OTHER); Thu, 05 Jul 2012 08:17:07 -0700 (PDT) From: Paolo Bonzini To: qemu-devel@nongnu.org Cc: avi@redhat.com, mtosatti@redhat.com, kvm@vger.kernel.org, anthony.perard@citrix.com, jan.kiszka@siemens.com, mst@redhat.com, stefano.stabellini@eu.citrix.com Subject: [PATCH uq/master 4/9] ivshmem: use EventNotifier and memory API Date: Thu, 5 Jul 2012 17:16:25 +0200 Message-Id: <1341501390-797-5-git-send-email-pbonzini@redhat.com> X-Mailer: git-send-email 1.7.10.2 In-Reply-To: <1341501390-797-1-git-send-email-pbonzini@redhat.com> References: <1341501390-797-1-git-send-email-pbonzini@redhat.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org All of ivshmem's usage of eventfd now has a corresponding API in EventNotifier. Simplify the code by using it, and also use the memory API consistently to set up and tear down the ioeventfds. Signed-off-by: Paolo Bonzini --- hw/ivshmem.c | 63 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/hw/ivshmem.c b/hw/ivshmem.c index 05559b6..3cdbea2 100644 --- a/hw/ivshmem.c +++ b/hw/ivshmem.c @@ -23,6 +23,7 @@ #include "kvm.h" #include "migration.h" #include "qerror.h" +#include "event_notifier.h" #include #include @@ -45,7 +46,7 @@ typedef struct Peer { int nb_eventfds; - int *eventfds; + EventNotifier *eventfds; } Peer; typedef struct EventfdEntry { @@ -63,7 +64,6 @@ typedef struct IVShmemState { CharDriverState *server_chr; MemoryRegion ivshmem_mmio; - pcibus_t mmio_addr; /* We might need to register the BAR before we actually have the memory. * So prepare a container MemoryRegion for the BAR immediately and * add a subregion when we have the memory. @@ -168,7 +168,6 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr, { IVShmemState *s = opaque; - uint64_t write_one = 1; uint16_t dest = val >> 16; uint16_t vector = val & 0xff; @@ -194,12 +193,8 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr, /* check doorbell range */ if (vector < s->peers[dest].nb_eventfds) { - IVSHMEM_DPRINTF("Writing %" PRId64 " to VM %d on vector %d\n", - write_one, dest, vector); - if (write(s->peers[dest].eventfds[vector], - &(write_one), 8) != 8) { - IVSHMEM_DPRINTF("error writing to eventfd\n"); - } + IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector); + event_notifier_set(&s->peers[dest].eventfds[vector]); } break; default: @@ -279,12 +274,13 @@ static void fake_irqfd(void *opaque, const uint8_t *buf, int size) { msix_notify(pdev, entry->vector); } -static CharDriverState* create_eventfd_chr_device(void * opaque, int eventfd, - int vector) +static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n, + int vector) { /* create a event character device based on the passed eventfd */ IVShmemState *s = opaque; CharDriverState * chr; + int eventfd = event_notifier_get_fd(n); chr = qemu_chr_open_eventfd(eventfd); @@ -347,6 +343,26 @@ static void create_shared_memory_BAR(IVShmemState *s, int fd) { pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar); } +static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i) +{ + memory_region_add_eventfd(&s->ivshmem_mmio, + DOORBELL, + 4, + true, + (posn << 16) | i, + event_notifier_get_fd(&s->peers[posn].eventfds[i])); +} + +static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) +{ + memory_region_del_eventfd(&s->ivshmem_mmio, + DOORBELL, + 4, + true, + (posn << 16) | i, + event_notifier_get_fd(&s->peers[posn].eventfds[i])); +} + static void close_guest_eventfds(IVShmemState *s, int posn) { int i, guest_curr_max; @@ -354,9 +370,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn) guest_curr_max = s->peers[posn].nb_eventfds; for (i = 0; i < guest_curr_max; i++) { - kvm_set_ioeventfd_mmio(s->peers[posn].eventfds[i], - s->mmio_addr + DOORBELL, (posn << 16) | i, 0, 4); - close(s->peers[posn].eventfds[i]); + ivshmem_del_eventfd(s, posn, i); + event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); @@ -369,12 +384,7 @@ static void setup_ioeventfds(IVShmemState *s) { for (i = 0; i <= s->max_peer; i++) { for (j = 0; j < s->peers[i].nb_eventfds; j++) { - memory_region_add_eventfd(&s->ivshmem_mmio, - DOORBELL, - 4, - true, - (i << 16) | j, - s->peers[i].eventfds[j]); + ivshmem_add_eventfd(s, i, j); } } } @@ -476,14 +486,14 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags) if (guest_max_eventfd == 0) { /* one eventfd per MSI vector */ - s->peers[incoming_posn].eventfds = (int *) g_malloc(s->vectors * - sizeof(int)); + s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors); } /* this is an eventfd for a particular guest VM */ IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn, guest_max_eventfd, incoming_fd); - s->peers[incoming_posn].eventfds[guest_max_eventfd] = incoming_fd; + event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd], + incoming_fd); /* increment count for particular guest */ s->peers[incoming_posn].nb_eventfds++; @@ -495,15 +505,12 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags) if (incoming_posn == s->vm_id) { s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s, - s->peers[s->vm_id].eventfds[guest_max_eventfd], + &s->peers[s->vm_id].eventfds[guest_max_eventfd], guest_max_eventfd); } if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { - if (kvm_set_ioeventfd_mmio(incoming_fd, s->mmio_addr + DOORBELL, - (incoming_posn << 16) | guest_max_eventfd, 1, 4) < 0) { - fprintf(stderr, "ivshmem: ioeventfd not available\n"); - } + ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd); } return;