From patchwork Thu Jun 19 09:45:46 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andre Przywara X-Patchwork-Id: 4382001 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 2505BBEEAA for ; Thu, 19 Jun 2014 09:46:52 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 46615202EB for ; Thu, 19 Jun 2014 09:46:51 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 39290201DD for ; Thu, 19 Jun 2014 09:46:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932306AbaFSJq2 (ORCPT ); Thu, 19 Jun 2014 05:46:28 -0400 Received: from cam-admin0.cambridge.arm.com ([217.140.96.50]:60765 "EHLO cam-admin0.cambridge.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757905AbaFSJq0 (ORCPT ); Thu, 19 Jun 2014 05:46:26 -0400 Received: from e106785-lin.cambridge.arm.com (e106785-lin.cambridge.arm.com [10.1.209.161]) by cam-admin0.cambridge.arm.com (8.12.6/8.12.6) with ESMTP id s5J9jqww010894; Thu, 19 Jun 2014 10:45:54 +0100 (BST) From: Andre Przywara To: linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org Cc: marc.zyngier@arm.com, christoffer.dall@linaro.org Subject: [PATCH 08/14] arm/arm64: KVM: refactor MMIO accessors Date: Thu, 19 Jun 2014 10:45:46 +0100 Message-Id: <1403171152-24067-9-git-send-email-andre.przywara@arm.com> X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1403171152-24067-1-git-send-email-andre.przywara@arm.com> References: <1403171152-24067-1-git-send-email-andre.przywara@arm.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP The MMIO accessors for GICD_I[CS]ENABLER, GICD_I[CS]PENDR and GICD_ICFGR behave very similiar in GICv3, although the way the affected vCPU is determined differs. Factor out a generic, backend-facing implementation and use small wrappers in the current GICv2 emulation to ease code sharing later. Signed-off-by: Andre Przywara --- virt/kvm/arm/vgic.c | 93 ++++++++++++++++++++++++++++----------------------- 1 file changed, 52 insertions(+), 41 deletions(-) diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 2de58b3..2a59dff 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -398,35 +398,54 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, return false; } -static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, - struct kvm_exit_mmio *mmio, - phys_addr_t offset) +static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id, int access) { - u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, - vcpu->vcpu_id, offset); - vgic_reg_access(mmio, reg, offset, - ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); + u32 *reg; + int mode = ACCESS_READ_VALUE | access; + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id); + + reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, mode); if (mmio->is_write) { - vgic_update_state(vcpu->kvm); + if (access & ACCESS_WRITE_CLEARBIT) { + if (offset < 4) /* Force SGI enabled */ + *reg |= 0xffff; + vgic_retire_disabled_irqs(target_vcpu); + } + vgic_update_state(kvm); return true; } return false; } +static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, ACCESS_WRITE_SETBIT); +} + static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, phys_addr_t offset) { - u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, - vcpu->vcpu_id, offset); - vgic_reg_access(mmio, reg, offset, - ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); +} + +static bool vgic_handle_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id, int access) +{ + u32 *reg; + int mode = ACCESS_READ_VALUE | access; + + reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_state, vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, mode); if (mmio->is_write) { - if (offset < 4) /* Force SGI enabled */ - *reg |= 0xffff; - vgic_retire_disabled_irqs(vcpu); - vgic_update_state(vcpu->kvm); + vgic_update_state(kvm); return true; } @@ -437,31 +456,16 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, phys_addr_t offset) { - u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, - vcpu->vcpu_id, offset); - vgic_reg_access(mmio, reg, offset, - ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); - if (mmio->is_write) { - vgic_update_state(vcpu->kvm); - return true; - } - - return false; + return vgic_handle_pending_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, ACCESS_WRITE_SETBIT); } static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, phys_addr_t offset) { - u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, - vcpu->vcpu_id, offset); - vgic_reg_access(mmio, reg, offset, - ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); - if (mmio->is_write) { - vgic_update_state(vcpu->kvm); - return true; - } - + return vgic_handle_pending_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); return false; } @@ -587,14 +591,10 @@ static u16 vgic_cfg_compress(u32 val) * LSB is always 0. As such, we only keep the upper bit, and use the * two above functions to compress/expand the bits */ -static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, - struct kvm_exit_mmio *mmio, phys_addr_t offset) +static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, + phys_addr_t offset) { u32 val; - u32 *reg; - - reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, - vcpu->vcpu_id, offset >> 1); if (offset & 4) val = *reg >> 16; @@ -623,6 +623,17 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, return false; } +static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 *reg; + + reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, + vcpu->vcpu_id, offset >> 1); + + return vgic_handle_cfg_reg(reg, mmio, offset); +} + static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, phys_addr_t offset) {