From patchwork Fri Oct 2 14:44:29 2015
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Pavel Fedin
X-Patchwork-Id: 7316801
Return-Path:
X-Original-To: patchwork-kvm@patchwork.kernel.org
Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org
Received: from mail.kernel.org (mail.kernel.org [198.145.29.136])
by patchwork1.web.kernel.org (Postfix) with ESMTP id 281069F1D5
for ;
Fri, 2 Oct 2015 14:45:12 +0000 (UTC)
Received: from mail.kernel.org (localhost [127.0.0.1])
by mail.kernel.org (Postfix) with ESMTP id 1A19C208F5
for ;
Fri, 2 Oct 2015 14:45:11 +0000 (UTC)
Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
by mail.kernel.org (Postfix) with ESMTP id E2764208F4
for ;
Fri, 2 Oct 2015 14:45:09 +0000 (UTC)
Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
id S1752313AbbJBOpD (ORCPT
);
Fri, 2 Oct 2015 10:45:03 -0400
Received: from mailout4.w1.samsung.com ([210.118.77.14]:18350 "EHLO
mailout4.w1.samsung.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
with ESMTP id S1751841AbbJBOoi (ORCPT );
Fri, 2 Oct 2015 10:44:38 -0400
Received: from eucpsbgm1.samsung.com (unknown [203.254.199.244])
by mailout4.w1.samsung.com
(Oracle Communications Messaging Server 7.0.5.31.0 64bit (built May 5
2014)) with ESMTP id <0NVL00JIXKYBRI50@mailout4.w1.samsung.com> for
kvm@vger.kernel.org; Fri, 02 Oct 2015 15:44:35 +0100 (BST)
X-AuditID: cbfec7f4-f79c56d0000012ee-3d-560e9853b433
Received: from eusync3.samsung.com ( [203.254.199.213])
by eucpsbgm1.samsung.com (EUCPMTA) with SMTP id 1D.5F.04846.3589E065;
Fri, 2 Oct 2015 15:44:35 +0100 (BST)
Received: from fedinw7x64.rnd.samsung.ru ([106.109.131.169])
by eusync3.samsung.com
(Oracle Communications Messaging Server 7.0.5.31.0 64bit (built May 5
2014)) with ESMTPA id <0NVL00CW9KY5LL80@eusync3.samsung.com>; Fri,
02 Oct 2015 15:44:35 +0100 (BST)
From: Pavel Fedin
To: kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Marc Zyngier ,
Andre Przywara
Subject: [PATCH 2/2] KVM: arm/arm64: Merge vgic_set_lr() and
vgic_sync_lr_elrsr()
Date: Fri, 02 Oct 2015 17:44:29 +0300
Message-id:
<4d9fb027da0d71c9b62bc3ffef1dcd037882ed02.1443796321.git.p.fedin@samsung.com>
X-Mailer: git-send-email 2.4.4
In-reply-to:
References:
In-reply-to:
References:
X-Brightmail-Tracker:
H4sIAAAAAAAAA+NgFprELMWRmVeSWpSXmKPExsVy+t/xq7rBM/jCDC6dELZYMe8no8WcqYUW
H08dZ7f4e+cfmwOLx5p5axg9zm9aw+zxeZNcAHMUl01Kak5mWWqRvl0CV8bftg9MBRd1K9Zc
6mNuYPys0sXIySEhYCKx/PwCZghbTOLCvfVsXYxcHEICSxkl5n/ZxwThtDFJTL67kBWkik1A
XeL01w8sILaIgKnE839vweLMAgESa/81gNnCAsESL7afYAexWQRUJbZ+PsYIYvMKREs8uw0R
lxCQk7hyfTobiM0pYC7xZfJ6sF4hATOJo81vWHGJT2DkX8DIsIpRNLU0uaA4KT3XUK84Mbe4
NC9dLzk/dxMjJKS+7GBcfMzqEKMAB6MSD2/Ead4wIdbEsuLK3EOMEhzMSiK8vql8YUK8KYmV
ValF+fFFpTmpxYcYpTlYlMR55+56HyIkkJ5YkpqdmlqQWgSTZeLglGpg7GFT3eHQVnbhbF0y
h9W/QIGV0q9PJ9SrRYRaW/01nff754qjGqKLe2//LZ+wftuLsi6v65b6xR/eHF4y9cxCWWO+
nbLW5x/JC1/ayH9g8ZQ9Xtoqmgb8zFba7F2hoac0QiUTf72ftGbTsQlmSS+vf/F+YOHVL8AR
qLL7cpmYVdbBJVp+3K41SizFGYmGWsxFxYkAWG7OYiUCAAA=
Sender: kvm-owner@vger.kernel.org
Precedence: bulk
List-ID:
X-Mailing-List: kvm@vger.kernel.org
X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI,
T_RP_MATCHES_RCVD,
UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1
X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org
X-Virus-Scanned: ClamAV using ClamSMTP
Now we see that vgic_set_lr() and vgic_sync_lr_elrsr() are always used
together. Merge them into one function, saving from second vgic_ops
dereferencing every time.
Additionally, remove unnecessary vgic_set_lr() in vgic_unqueue_irqs(),
because the following vgic_retire_lr() will reset lr.state to zero
anyway.
Signed-off-by: Pavel Fedin
---
include/kvm/arm_vgic.h | 1 -
virt/kvm/arm/vgic-v2.c | 5 -----
virt/kvm/arm/vgic-v3.c | 5 -----
virt/kvm/arm/vgic.c | 30 ++++--------------------------
4 files changed, 4 insertions(+), 37 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index d908028..ab5d242 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -112,7 +112,6 @@ struct vgic_vmcr {
struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
- void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
void (*clear_eisr)(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 8d7b04d..f9d8da5 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -79,11 +79,7 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
-}
-static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
else
@@ -166,7 +162,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v2_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
- .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
.clear_eisr = vgic_v2_clear_eisr,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 7dd5d62..75f6d91 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -112,11 +112,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
}
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
-}
-static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
else
@@ -211,7 +207,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v3_ops = {
.get_lr = vgic_v3_get_lr,
.set_lr = vgic_v3_set_lr,
- .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
.get_elrsr = vgic_v3_get_elrsr,
.get_eisr = vgic_v3_get_eisr,
.clear_eisr = vgic_v3_clear_eisr,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 2f4d25a..7e164eb 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -709,10 +709,8 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* interrupt then move the active state to the
* distributor tracking bit.
*/
- if (lr.state & LR_STATE_ACTIVE) {
+ if (lr.state & LR_STATE_ACTIVE)
vgic_irq_set_active(vcpu, lr.irq);
- lr.state &= ~LR_STATE_ACTIVE;
- }
/*
* Reestablish the pending state on the distributor and the
@@ -720,17 +718,12 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* is fine, then we are only setting a few bits that were
* already set.
*/
- if (lr.state & LR_STATE_PENDING) {
+ if (lr.state & LR_STATE_PENDING)
vgic_dist_irq_set_pending(vcpu, lr.irq);
- lr.state &= ~LR_STATE_PENDING;
- }
-
- vgic_set_lr(vcpu, i, lr);
/*
* Mark the LR as free for other use.
*/
- BUG_ON(lr.state & LR_STATE_MASK);
vgic_retire_lr(i, vcpu);
vgic_irq_clear_queued(vcpu, lr.irq);
@@ -1039,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
vgic_ops->set_lr(vcpu, lr, vlr);
}
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr vlr)
-{
- vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
-}
-
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
return vgic_ops->get_elrsr(vcpu);
@@ -1096,7 +1083,6 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
vlr.state = 0;
vgic_set_lr(vcpu, lr_nr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1160,7 +1146,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
}
vgic_set_lr(vcpu, lr_nr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}
/*
@@ -1380,12 +1365,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
}
spin_unlock(&dist->lock);
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
}
}
@@ -1446,8 +1425,6 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
bool level_pending;
level_pending = vgic_process_maintenance(vcpu);
- elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = u64_to_bitmask(&elrsr);
/* Deal with HW interrupts, and clear mappings for empty LRs */
for (lr = 0; lr < vgic->nr_lr; lr++) {
@@ -1463,11 +1440,12 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
vlr.hwirq = 0;
vgic_set_lr(vcpu, lr, vlr);
vgic_irq_clear_queued(vcpu, vlr.irq);
- set_bit(lr, elrsr_ptr);
}
}
/* Check if we still have something up our sleeve... */
+ elrsr = vgic_get_elrsr(vcpu);
+ elrsr_ptr = u64_to_bitmask(&elrsr);
pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
if (level_pending || pending < vgic->nr_lr)
set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);