From patchwork Fri Aug 28 12:56:10 2015
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Pavel Fedin
X-Patchwork-Id: 7091791
Return-Path:
X-Original-To: patchwork-kvm@patchwork.kernel.org
Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org
Received: from mail.kernel.org (mail.kernel.org [198.145.29.136])
by patchwork1.web.kernel.org (Postfix) with ESMTP id 177BD9F46B
for ;
Fri, 28 Aug 2015 12:56:29 +0000 (UTC)
Received: from mail.kernel.org (localhost [127.0.0.1])
by mail.kernel.org (Postfix) with ESMTP id 0181D20846
for ;
Fri, 28 Aug 2015 12:56:28 +0000 (UTC)
Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
by mail.kernel.org (Postfix) with ESMTP id A91C82084A
for ;
Fri, 28 Aug 2015 12:56:26 +0000 (UTC)
Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
id S1752378AbbH1M4Y (ORCPT
);
Fri, 28 Aug 2015 08:56:24 -0400
Received: from mailout4.w1.samsung.com ([210.118.77.14]:16051 "EHLO
mailout4.w1.samsung.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
with ESMTP id S1751844AbbH1M4U (ORCPT );
Fri, 28 Aug 2015 08:56:20 -0400
Received: from eucpsbgm1.samsung.com (unknown [203.254.199.244])
by mailout4.w1.samsung.com
(Oracle Communications Messaging Server 7.0.5.31.0 64bit (built May 5
2014)) with ESMTP id <0NTS00MXLMLTD940@mailout4.w1.samsung.com> for
kvm@vger.kernel.org; Fri, 28 Aug 2015 13:56:18 +0100 (BST)
X-AuditID: cbfec7f4-f79c56d0000012ee-17-55e05a71c2ce
Received: from eusync2.samsung.com ( [203.254.199.212])
by eucpsbgm1.samsung.com (EUCPMTA) with SMTP id 59.AD.04846.17A50E55;
Fri, 28 Aug 2015 13:56:17 +0100 (BST)
Received: from fedinw7x64.rnd.samsung.ru ([106.109.131.169])
by eusync2.samsung.com
(Oracle Communications Messaging Server 7.0.5.31.0 64bit (built May 5
2014)) with ESMTPA id <0NTS00AZHMLPHM90@eusync2.samsung.com>; Fri,
28 Aug 2015 13:56:17 +0100 (BST)
From: Pavel Fedin
To: kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Marc Zyngier ,
Christoffer Dall
Subject: [PATCH 1/3] KVM: arm64: Implement vGICv3 distributor and
redistributor access from userspace
Date: Fri, 28 Aug 2015 15:56:10 +0300
Message-id:
X-Mailer: git-send-email 2.4.4
In-reply-to:
References:
In-reply-to:
References:
X-Brightmail-Tracker:
H4sIAAAAAAAAA+NgFprGLMWRmVeSWpSXmKPExsVy+t/xK7qFUQ9CDe5utrR48fofo8WcqYUW
H08dZ7f4e+cfmwOLx5p5axg97lzbw+ZxftMaZo/Pm+QCWKK4bFJSczLLUov07RK4MuZvbWEt
eGBaceD+VrYGxk9aXYycHBICJhKzFr9ghbDFJC7cW8/WxcjFISSwlFHi0rwL7BBOG5PE76mX
2ECq2ATUJU5//cACYosImEo8//cWqJuDg1kgUuLtdnWQsLBAusTtFafZQWwWAVWJKZ8/g9m8
AtES37dMh1omJ3Hl+nSwkZwC5hIr1+9gBLGFBMwkju05wI5LfAIj/wJGhlWMoqmlyQXFSem5
hnrFibnFpXnpesn5uZsYIaH1ZQfj4mNWhxgFOBiVeHgtNtwPFWJNLCuuzD3EKMHBrCTCGyL0
IFSINyWxsiq1KD++qDQntfgQozQHi5I479xd70OEBNITS1KzU1MLUotgskwcnFINjKsdd9Rz
GAjstNhw8Of7oLlW/yfM/SYYciTKmX+JBPcbvSjWxw+veW95dXaKR/Cyn4181RUcxmlP1oQt
zlO6/GrHvdy3YS/ni7wO8Xt7YJH31yfWopzz3l6N/54mZHqBL4P1RPiaH2xHvzMHV/sprTuS
laDi/POZU4Nzwv4zd7foL27T/73pc7kSS3FGoqEWc1FxIgBuXBoIKQIAAA==
Sender: kvm-owner@vger.kernel.org
Precedence: bulk
List-ID:
X-Mailing-List: kvm@vger.kernel.org
X-Spam-Status: No, score=-8.3 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI,
RP_MATCHES_RCVD,
UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1
X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org
X-Virus-Scanned: ClamAV using ClamSMTP
The access is done similar to GICv2, using KVM_DEV_ARM_VGIC_GRP_DIST_REGS
and KVM_DEV_ARM_VGIC_GRP_REDIST_REGS with KVM_SET_DEVICE_ATTR and
KVM_GET_DEVICE_ATTR ioctls.
Registers are always assumed to be of their native size, 4 or 8 bytes.
Signed-off-by: Pavel Fedin
---
arch/arm64/include/uapi/asm/kvm.h | 1 +
virt/kvm/arm/vgic-v3-emul.c | 186 +++++++++++++++++++++++++++++++++++---
2 files changed, 172 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 0cd7b59..2936651 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -203,6 +203,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index e661e7f..b3847e1 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -39,6 +39,7 @@
#include
#include
#include
+#include
#include
#include
@@ -990,6 +991,107 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
vgic_kick_vcpus(vcpu->kvm);
}
+static int vgic_v3_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ void *reg, u32 len, bool is_write)
+{
+ const struct vgic_io_range *r = NULL, *ranges;
+ phys_addr_t offset;
+ int ret, cpuid, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ struct vgic_dist *vgic;
+ struct kvm_exit_mmio mmio;
+ u64 data;
+
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+ mutex_lock(&dev->kvm->lock);
+
+ ret = vgic_init(dev->kvm);
+ if (ret)
+ goto out;
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ vgic = &dev->kvm->arch.vgic;
+
+ mmio.len = len;
+ mmio.is_write = is_write;
+ mmio.data = &data;
+ if (is_write) {
+ if (len == 8)
+ data = cpu_to_le64(*((u64 *)reg));
+ else
+ mmio_data_write(&mmio, ~0, *((u32 *)reg));
+ }
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ mmio.phys_addr = vgic->vgic_dist_base + offset;
+ ranges = vgic_v3_dist_ranges;
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
+ mmio.phys_addr = vgic->vgic_redist_base + offset;
+ ranges = vgic_redist_ranges;
+ break;
+ default:
+ BUG();
+ }
+ r = vgic_find_range(ranges, 4, offset);
+
+ if (unlikely(!r || !r->handle_mmio)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+
+ spin_lock(&vgic->lock);
+
+ /*
+ * Ensure that no other VCPU is running by checking the vcpu->cpu
+ * field. If no other VPCUs are running we can safely access the VGIC
+ * state, because even if another VPU is run after this point, that
+ * VCPU will not touch the vgic state, because it will block on
+ * getting the vgic->lock in kvm_vgic_sync_hwstate().
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (unlikely(tmp_vcpu->cpu != -1)) {
+ ret = -EBUSY;
+ goto out_vgic_unlock;
+ }
+ }
+
+ /*
+ * Move all pending IRQs from the LRs on all VCPUs so the pending
+ * state can be properly represented in the register state accessible
+ * through this API.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
+ vgic_unqueue_irqs(tmp_vcpu);
+
+ offset -= r->base;
+ r->handle_mmio(vcpu, &mmio, offset);
+
+ if (!is_write) {
+ if (len == 8)
+ *(u64 *)reg = le64_to_cpu(data);
+ else
+ *(u32 *)reg = mmio_data_read(&mmio, ~0);
+ }
+
+ ret = 0;
+out_vgic_unlock:
+ spin_unlock(&vgic->lock);
+out:
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
+}
+
static int vgic_v3_create(struct kvm_device *dev, u32 type)
{
return kvm_vgic_create(dev->kvm, type);
@@ -1000,40 +1102,95 @@ static void vgic_v3_destroy(struct kvm_device *dev)
kfree(dev);
}
+static u32 vgic_v3_get_reg_size(struct kvm_device_attr *attr)
+{
+ u32 offset;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ if (offset >= GICD_IROUTER && offset <= 0x7FD8)
+ return 8;
+ else
+ return 4;
+ break;
+
+ case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
+ offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ if ((offset == GICR_TYPER) ||
+ (offset >= GICR_SETLPIR && offset <= GICR_INVALLR))
+ return 8;
+ else
+ return 4;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+}
+
static int vgic_v3_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
- int ret;
+ int ret, len;
+ u64 reg64;
+ u32 reg;
+ void *data;
ret = vgic_set_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- return -ENXIO;
+ len = vgic_v3_get_reg_size(attr);
+ if (len < 0)
+ return len;
+
+ if (len == 8) {
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+
+ ret = get_user(reg64, uaddr);
+ data = ®64;
+ } else {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
+ ret = get_user(reg, uaddr);
+ data = ®
}
+ if (ret)
+ return -EFAULT;
- return -ENXIO;
+ return vgic_v3_attr_regs_access(dev, attr, data, len, true);
}
static int vgic_v3_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
- int ret;
+ int ret, len;
+ u64 reg64;
+ u32 reg;
ret = vgic_get_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- return -ENXIO;
- }
+ len = vgic_v3_get_reg_size(attr);
+ if (len < 0)
+ return len;
- return -ENXIO;
+ ret = vgic_v3_attr_regs_access(dev, attr, (len == 8) ? (void *)®64 :
+ (void *)®, len, false);
+ if (ret)
+ return ret;
+
+ if (len == 8) {
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+
+ return put_user(reg64, uaddr);
+ } else {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
+ return put_user(reg, uaddr);
+ }
}
static int vgic_v3_has_attr(struct kvm_device *dev,
@@ -1051,8 +1208,7 @@ static int vgic_v3_has_attr(struct kvm_device *dev,
}
break;
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- return -ENXIO;
+ case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
return 0;
case KVM_DEV_ARM_VGIC_GRP_CTRL: