From patchwork Wed Jan 7 10:42:37 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sheng Yang X-Patchwork-Id: 1134 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n07AdXbL029916 for ; Wed, 7 Jan 2009 02:39:34 -0800 Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751832AbZAGKnF (ORCPT ); Wed, 7 Jan 2009 05:43:05 -0500 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1751824AbZAGKnF (ORCPT ); Wed, 7 Jan 2009 05:43:05 -0500 Received: from mga09.intel.com ([134.134.136.24]:31575 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751901AbZAGKmy (ORCPT ); Wed, 7 Jan 2009 05:42:54 -0500 Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP; 07 Jan 2009 02:34:54 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.37,225,1231142400"; d="scan'208";a="479597889" Received: from syang10-desktop.sh.intel.com (HELO syang10-desktop) ([10.239.13.47]) by orsmga001.jf.intel.com with ESMTP; 07 Jan 2009 02:41:21 -0800 Received: from yasker by syang10-desktop with local (Exim 4.69) (envelope-from ) id 1LKVs7-0005nq-3L; Wed, 07 Jan 2009 18:42:47 +0800 From: Sheng Yang To: Avi Kivity Cc: Marcelo Tosatti , kvm@vger.kernel.org, Sheng Yang Subject: [PATCH 01/10] KVM: Add a route layer to convert MSI message to GSI Date: Wed, 7 Jan 2009 18:42:37 +0800 Message-Id: <1231324966-22286-2-git-send-email-sheng@linux.intel.com> X-Mailer: git-send-email 1.5.6.3 In-Reply-To: <1231324966-22286-1-git-send-email-sheng@linux.intel.com> References: <1231324966-22286-1-git-send-email-sheng@linux.intel.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including MSI. So here is it. struct gsi_route_entry is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to MSI/MSI-X message address/data. And the struct can also be extended for other purpose. Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by kernel and provide two ioctls to userspace, which is more flexiable. Signed-off-by: Sheng Yang --- include/linux/kvm.h | 26 +++++++++++ include/linux/kvm_host.h | 20 +++++++++ virt/kvm/irq_comm.c | 70 ++++++++++++++++++++++++++++++ virt/kvm/kvm_main.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 222 insertions(+), 0 deletions(-) diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 71c150f..bbefce6 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -399,6 +399,9 @@ struct kvm_trace_rec { #if defined(CONFIG_X86) #define KVM_CAP_REINJECT_CONTROL 24 #endif +#if defined(CONFIG_X86) +#define KVM_CAP_GSI_ROUTE 25 +#endif /* * ioctls for VM fds @@ -433,6 +436,8 @@ struct kvm_trace_rec { #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ struct kvm_assigned_irq) #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) +#define KVM_REQUEST_GSI_ROUTE _IOWR(KVMIO, 0x72, void *) +#define KVM_FREE_GSI_ROUTE _IOR(KVMIO, 0x73, void *) /* * ioctls for vcpu fds @@ -553,4 +558,25 @@ struct kvm_assigned_irq { #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION KVM_DEV_IRQ_ASSIGN_ENABLE_MSI #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) +struct kvm_gsi_route_guest { + __u32 entries_nr; + struct kvm_gsi_route_entry_guest *entries; +}; + +#define KVM_GSI_ROUTE_MSI (1 << 0) +struct kvm_gsi_route_entry_guest { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 reserved; + union { + struct { + __u32 addr_lo; + __u32 addr_hi; + __u32 data; + } msi; + __u32 padding[8]; + }; +}; + #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a8bcad0..6a00201 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -136,6 +136,9 @@ struct kvm { unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif + struct hlist_head gsi_route_list; +#define KVM_NR_GSI_ROUTE_ENTRIES 256 + DECLARE_BITMAP(gsi_route_bitmap, KVM_NR_GSI_ROUTE_ENTRIES); }; /* The guest did something we don't support. */ @@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn); void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); +#define KVM_GSI_ROUTE_MASK 0x1000000ull +struct kvm_gsi_route_entry { + u32 gsi; + u32 type; + u32 flags; + u32 reserved; + union { + struct msi_msg msi; + u32 reserved[8]; + }; + struct hlist_node link; +}; + void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); void kvm_register_irq_ack_notifier(struct kvm *kvm, @@ -343,6 +359,10 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry); +struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi); +void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry); +void kvm_free_gsi_route_list(struct kvm *kvm); #ifdef CONFIG_DMAR int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 5162a41..7460e7f 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) kimn->func(kimn, mask); } +int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry) +{ + struct kvm_gsi_route_entry *found_entry, *new_entry; + int r, gsi; + + mutex_lock(&kvm->lock); + /* Find whether we need a update or a new entry */ + found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi); + if (found_entry) + *found_entry = *entry; + else { + gsi = find_first_zero_bit(kvm->gsi_route_bitmap, + KVM_NR_GSI_ROUTE_ENTRIES); + if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) { + r = -ENOSPC; + goto out; + } + __set_bit(gsi, kvm->gsi_route_bitmap); + entry->gsi = gsi | KVM_GSI_ROUTE_MASK; + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (!new_entry) { + r = -ENOMEM; + goto out; + } + *new_entry = *entry; + hlist_add_head(&new_entry->link, &kvm->gsi_route_list); + } + r = 0; +out: + mutex_unlock(&kvm->lock); + return r; +} + +/* Call with kvm->lock hold */ +struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi) +{ + struct kvm_gsi_route_entry *entry; + struct hlist_node *n; + + if (!(gsi & KVM_GSI_ROUTE_MASK)) + return NULL; + hlist_for_each_entry(entry, n, &kvm->gsi_route_list, link) + if (entry->gsi == gsi) + goto out; + entry = NULL; +out: + return entry; +} + +/* Call with kvm->lock hold */ +void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry) +{ + if (!entry) + return; + __clear_bit(entry->gsi & ~KVM_GSI_ROUTE_MASK, kvm->gsi_route_bitmap); + hlist_del(&entry->link); + kfree(entry); +} + +void kvm_free_gsi_route_list(struct kvm *kvm) +{ + struct kvm_gsi_route_entry *entry; + struct hlist_node *pos, *n; + + mutex_lock(&kvm->lock); + hlist_for_each_entry_safe(entry, pos, n, &kvm->gsi_route_list, link) + kvm_free_gsi_route(kvm, entry); + mutex_unlock(&kvm->lock); +} + diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 61688a6..bc1a27b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -839,6 +839,7 @@ static struct kvm *kvm_create_vm(void) #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET kvm_coalesced_mmio_init(kvm); #endif + INIT_HLIST_HEAD(&kvm->gsi_route_list); out: return kvm; } @@ -877,6 +878,7 @@ static void kvm_destroy_vm(struct kvm *kvm) struct mm_struct *mm = kvm->mm; kvm_arch_sync_events(kvm); + kvm_free_gsi_route_list(kvm); spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); @@ -1605,6 +1607,47 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) return 0; } +static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm, + struct kvm_gsi_route_guest *gsi_route, + struct kvm_gsi_route_entry_guest *guest_entries) +{ + struct kvm_gsi_route_entry entry; + int r, i; + + for (i = 0; i < gsi_route->entries_nr; i++) { + memcpy(&entry, &guest_entries[i], sizeof(entry)); + r = kvm_update_gsi_route(kvm, &entry); + if (r == 0) + guest_entries[i].gsi = entry.gsi; + else + break; + } + return r; +} + +static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm, + struct kvm_gsi_route_guest *gsi_route, + struct kvm_gsi_route_entry_guest *guest_entries) +{ + struct kvm_gsi_route_entry *entry; + int r, i; + + mutex_lock(&kvm->lock); + for (i = 0; i < gsi_route->entries_nr; i++) { + entry = kvm_find_gsi_route_entry(kvm, guest_entries[i].gsi); + if (!entry || + memcmp(entry, &guest_entries[i], sizeof(*entry)) != 0) { + printk(KERN_WARNING "kvm: illegal gsi mapping!"); + r = -EINVAL; + goto out; + } + kvm_free_gsi_route(kvm, entry); + } +out: + mutex_unlock(&kvm->lock); + return r; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -1803,6 +1846,7 @@ static long kvm_vm_ioctl(struct file *filp, { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm_gsi_route_entry_guest *gsi_entries = NULL; int r; if (kvm->mm != current->mm) @@ -1887,10 +1931,72 @@ static long kvm_vm_ioctl(struct file *filp, break; } #endif + case KVM_REQUEST_GSI_ROUTE: { + struct kvm_gsi_route_guest gsi_route; + r = copy_from_user(&gsi_route, argp, sizeof gsi_route); + if (r) + goto out; + if (gsi_route.entries_nr == 0) { + r = -EFAULT; + goto out; + } + gsi_entries = kmalloc(gsi_route.entries_nr * + sizeof(struct kvm_gsi_route_entry_guest), + GFP_KERNEL); + if (!gsi_entries) { + r = -ENOMEM; + goto out; + } + r = copy_from_user(gsi_entries, + (void __user *)gsi_route.entries, + gsi_route.entries_nr * + sizeof(struct kvm_gsi_route_entry_guest)); + if (r) + goto out; + r = kvm_vm_ioctl_request_gsi_route(kvm, &gsi_route, + gsi_entries); + if (r) + goto out; + r = copy_to_user((void __user *)gsi_route.entries, + gsi_entries, + gsi_route.entries_nr * + sizeof(struct kvm_gsi_route_entry_guest)); + if (r) + goto out; + break; + } + case KVM_FREE_GSI_ROUTE: { + struct kvm_gsi_route_guest gsi_route; + r = copy_from_user(&gsi_route, argp, sizeof gsi_route); + if (r) + goto out; + if (gsi_route.entries_nr == 0) { + r = -EFAULT; + goto out; + } + gsi_entries = kmalloc(gsi_route.entries_nr * + sizeof(struct kvm_gsi_route_entry_guest), + GFP_KERNEL); + if (!gsi_entries) { + r = -ENOMEM; + goto out; + } + r = copy_from_user(gsi_entries, + (void __user *)gsi_route.entries, + gsi_route.entries_nr * + sizeof(struct kvm_gsi_route_entry_guest)); + if (r) + goto out; + r = kvm_vm_ioctl_free_gsi_route(kvm, &gsi_route, gsi_entries); + if (r) + goto out; + break; + } default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); } out: + kfree(gsi_entries); return r; }