From patchwork Tue Jan 8 18:42:38 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoffer Dall X-Patchwork-Id: 1947351 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id 902F83FED4 for ; Tue, 8 Jan 2013 18:42:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757156Ab3AHSml (ORCPT ); Tue, 8 Jan 2013 13:42:41 -0500 Received: from mail-vb0-f42.google.com ([209.85.212.42]:52818 "EHLO mail-vb0-f42.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757127Ab3AHSmk (ORCPT ); Tue, 8 Jan 2013 13:42:40 -0500 Received: by mail-vb0-f42.google.com with SMTP id fa15so746618vbb.29 for ; Tue, 08 Jan 2013 10:42:40 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20120113; h=x-received:subject:to:from:cc:date:message-id:in-reply-to :references:user-agent:mime-version:content-type :content-transfer-encoding:x-gm-message-state; bh=YthdNYdTythmKlcoy89O9pDdv4kgJv9Bj0GlJJJxoF0=; b=ERrBz/pm5QJL13PS9Mh6nAZ+IqAMD8Kg9VpDKQh4dtZgmxMfeMe4OufI+38rh4ovCG 1oPMybnZd+T+wznrOIwqavsbI88i6AGN7BQDIWq9FuyBjOLD43ZPRpWQdOZ6YCwwkoVT IllN6fCK1aBfgoBIecYF6WKXjaJeYL2k1bzzqhwI2pGuC8h9UncSpiPV4Zwf6v/4mgKZ zGqb6ik6ExSjrgTR23wWsY0BoCsRH0remr6SXbzMxsN5UM8VNAreSM9o6M47PhenT26k xOe7QyJxNHeG6nvWFcw4M/kw6u2ty3NL04+lp9s5u8PhCoJMgQmHuHDSy3iqoQzPRrgV R5WA== X-Received: by 10.221.0.79 with SMTP id nl15mr87805251vcb.41.1357670559958; Tue, 08 Jan 2013 10:42:39 -0800 (PST) Received: from [127.0.1.1] (pool-72-80-83-148.nycmny.fios.verizon.net. [72.80.83.148]) by mx.google.com with ESMTPS id a10sm50826611vez.10.2013.01.08.10.42.39 (version=TLSv1/SSLv3 cipher=OTHER); Tue, 08 Jan 2013 10:42:39 -0800 (PST) Subject: [PATCH v5 11/12] ARM: KVM: VGIC initialisation code To: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu From: Christoffer Dall Cc: Marc Zyngier Date: Tue, 08 Jan 2013 13:42:38 -0500 Message-ID: <20130108184238.46558.95837.stgit@ubuntu> In-Reply-To: <20130108184116.46558.3558.stgit@ubuntu> References: <20130108184116.46558.3558.stgit@ubuntu> User-Agent: StGit/0.15 MIME-Version: 1.0 X-Gm-Message-State: ALoCoQnjTjJu9tG1hOFsxTe4HQ7pbfzFw2lYlW/qRqp3CbLwLA5qbL9ZB8A3cWqdbk93q1lWcMLF Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Marc Zyngier Add the init code for the hypervisor, the virtual machine, and the virtual CPUs. An interrupt handler is also wired to allow the VGIC maintenance interrupts, used to deal with level triggered interrupts and LR underflows. A CPU hotplug notifier is registered to disable/enable the interrupt as requested. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_vgic.h | 11 ++ arch/arm/kvm/arm.c | 15 +++ arch/arm/kvm/vgic.c | 223 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 249 insertions(+) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h index 9ff0e52..5e81e28 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/arch/arm/include/asm/kvm_vgic.h @@ -71,6 +71,7 @@ struct vgic_bytemap { struct vgic_dist { #ifdef CONFIG_KVM_ARM_VGIC spinlock_t lock; + bool ready; /* Virtual control interface mapping */ void __iomem *vctrl_base; @@ -144,6 +145,10 @@ struct kvm_exit_mmio; #ifdef CONFIG_KVM_ARM_VGIC int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr); +int kvm_vgic_hyp_init(void); +int kvm_vgic_init(struct kvm *kvm); +int kvm_vgic_create(struct kvm *kvm); +int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); void kvm_vgic_sync_to_cpu(struct kvm_vcpu *vcpu); void kvm_vgic_sync_from_cpu(struct kvm_vcpu *vcpu); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, @@ -153,6 +158,7 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_exit_mmio *mmio); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base)) +#define vgic_initialized(k) ((k)->arch.vgic.ready) #else static inline int kvm_vgic_hyp_init(void) @@ -204,6 +210,11 @@ static inline int irqchip_in_kernel(struct kvm *kvm) { return 0; } + +static inline bool vgic_initialized(struct kvm *kvm) +{ + return true; +} #endif #endif diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 8dd949c..ac72a8f 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -659,6 +659,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (unlikely(vcpu->arch.target < 0)) return -ENOEXEC; + /* + * Initialize the VGIC before running a vcpu the first time on + * this VM. + */ + if (irqchip_in_kernel(vcpu->kvm) && + unlikely(!vgic_initialized(vcpu->kvm))) { + ret = kvm_vgic_init(vcpu->kvm); + if (ret) + return ret; + } + if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu, vcpu->run); if (ret) @@ -1066,6 +1077,10 @@ static int init_hyp_mode(void) if (err) goto out_free_vfp; +#ifdef CONFIG_KVM_ARM_VGIC + vgic_present = true; +#endif + kvm_info("Hyp mode initialized successfully\n"); return 0; out_free_vfp: diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index 65e5282..083639b 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c @@ -16,11 +16,19 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include #include +#include +#include +#include + #include +#include +#include +#include /* * How the whole thing works (courtesy of Christoffer Dall): @@ -62,6 +70,14 @@ #define VGIC_ADDR_UNDEF (-1) #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) +/* Physical address of vgic virtual cpu interface */ +static phys_addr_t vgic_vcpu_base; + +/* Virtual control interface base address */ +static void __iomem *vgic_vctrl_base; + +static struct device_node *vgic_node; + #define ACCESS_READ_VALUE (1 << 0) #define ACCESS_READ_RAZ (0 << 0) #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) @@ -75,6 +91,9 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); static void vgic_update_state(struct kvm *kvm); static void vgic_kick_vcpus(struct kvm *kvm); static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); +static u32 vgic_nr_lr; + +static unsigned int vgic_maint_irq; static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset) @@ -1209,6 +1228,210 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, return 0; } +static irqreturn_t vgic_maintenance_handler(int irq, void *data) +{ + /* + * We cannot rely on the vgic maintenance interrupt to be + * delivered synchronously. This means we can only use it to + * exit the VM, and we perform the handling of EOIed + * interrupts on the exit path (see vgic_process_maintenance). + */ + return IRQ_HANDLED; +} + +int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + int i; + + if (!irqchip_in_kernel(vcpu->kvm)) + return 0; + + if (vcpu->vcpu_id >= VGIC_MAX_CPUS) + return -EBUSY; + + for (i = 0; i < VGIC_NR_IRQS; i++) { + if (i < VGIC_NR_PPIS) + vgic_bitmap_set_irq_val(&dist->irq_enabled, + vcpu->vcpu_id, i, 1); + if (i < VGIC_NR_PRIVATE_IRQS) + vgic_bitmap_set_irq_val(&dist->irq_cfg, + vcpu->vcpu_id, i, VGIC_CFG_EDGE); + + vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY; + } + + /* + * By forcing VMCR to zero, the GIC will restore the binary + * points to their reset values. Anything else resets to zero + * anyway. + */ + vgic_cpu->vgic_vmcr = 0; + + vgic_cpu->nr_lr = vgic_nr_lr; + vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ + + return 0; +} + +static void vgic_init_maintenance_interrupt(void *info) +{ + enable_percpu_irq(vgic_maint_irq, 0); +} + +static int vgic_cpu_notify(struct notifier_block *self, + unsigned long action, void *cpu) +{ + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + vgic_init_maintenance_interrupt(NULL); + break; + case CPU_DYING: + case CPU_DYING_FROZEN: + disable_percpu_irq(vgic_maint_irq); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block vgic_cpu_nb = { + .notifier_call = vgic_cpu_notify, +}; + +int kvm_vgic_hyp_init(void) +{ + int ret; + struct resource vctrl_res; + struct resource vcpu_res; + + vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); + if (!vgic_node) { + kvm_err("error: no compatible vgic node in DT\n"); + return -ENODEV; + } + + vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); + if (!vgic_maint_irq) { + kvm_err("error getting vgic maintenance irq from DT\n"); + ret = -ENXIO; + goto out; + } + + ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, + "vgic", kvm_get_running_vcpus()); + if (ret) { + kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); + goto out; + } + + ret = register_cpu_notifier(&vgic_cpu_nb); + if (ret) { + kvm_err("Cannot register vgic CPU notifier\n"); + goto out_free_irq; + } + + ret = of_address_to_resource(vgic_node, 2, &vctrl_res); + if (ret) { + kvm_err("Cannot obtain VCTRL resource\n"); + goto out_free_irq; + } + + vgic_vctrl_base = of_iomap(vgic_node, 2); + if (!vgic_vctrl_base) { + kvm_err("Cannot ioremap VCTRL\n"); + ret = -ENOMEM; + goto out_free_irq; + } + + vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR); + vgic_nr_lr = (vgic_nr_lr & 0x1f) + 1; + + ret = create_hyp_io_mappings(vgic_vctrl_base, + vgic_vctrl_base + resource_size(&vctrl_res), + vctrl_res.start); + if (ret) { + kvm_err("Cannot map VCTRL into hyp\n"); + goto out_unmap; + } + + kvm_info("%s@%llx IRQ%d\n", vgic_node->name, + vctrl_res.start, vgic_maint_irq); + on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); + + if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { + kvm_err("Cannot obtain VCPU resource\n"); + ret = -ENXIO; + goto out_unmap; + } + vgic_vcpu_base = vcpu_res.start; + + goto out; + +out_unmap: + iounmap(vgic_vctrl_base); +out_free_irq: + free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); +out: + of_node_put(vgic_node); + return ret; +} + +int kvm_vgic_init(struct kvm *kvm) +{ + int ret = 0, i; + + mutex_lock(&kvm->lock); + + if (vgic_initialized(kvm)) + goto out; + + if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || + IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { + kvm_err("Need to set vgic cpu and dist addresses first\n"); + ret = -ENXIO; + goto out; + } + + ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, + vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); + if (ret) { + kvm_err("Unable to remap VGIC CPU to VCPU\n"); + goto out; + } + + for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) + vgic_set_target_reg(kvm, 0, i); + + kvm->arch.vgic.ready = true; +out: + mutex_unlock(&kvm->lock); + return ret; +} + +int kvm_vgic_create(struct kvm *kvm) +{ + int ret = 0; + + mutex_lock(&kvm->lock); + + if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) { + ret = -EEXIST; + goto out; + } + + spin_lock_init(&kvm->arch.vgic.lock); + kvm->arch.vgic.vctrl_base = vgic_vctrl_base; + kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; + kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; + +out: + mutex_unlock(&kvm->lock); + return ret; +} + static bool vgic_ioaddr_overlap(struct kvm *kvm) { phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;