diff mbox

[v3,5/5] KVM: arm64: Implement vGICv3 CPU interface access

Message ID 20be17e4e8aebcd7f1a52a634f449955bef99996.1441370053.git.p.fedin@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Pavel Fedin Sept. 4, 2015, 12:40 p.m. UTC
The access is done similar to GICv2, using KVM_DEV_ARM_VGIC_GRP_CPU_REGS
group, however attribute ID encodes corresponding system register. Access
size is always 64 bits.

Since CPU interface state actually affects only a single vCPU, no vGIC
locking is done. Just made sure that the vCPU is not running.

Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
---
 Documentation/virtual/kvm/devices/arm-vgic.txt |  38 +++-
 arch/arm64/include/uapi/asm/kvm.h              |   7 +
 include/linux/irqchip/arm-gic-v3.h             |  18 +-
 virt/kvm/arm/vgic-v3-emul.c                    | 244 +++++++++++++++++++++++++
 4 files changed, 303 insertions(+), 4 deletions(-)

Comments

Andre Przywara Sept. 4, 2015, 3:13 p.m. UTC | #1
Hi Pavel,

On 04/09/15 13:40, Pavel Fedin wrote:
> The access is done similar to GICv2, using KVM_DEV_ARM_VGIC_GRP_CPU_REGS
> group, however attribute ID encodes corresponding system register. Access
> size is always 64 bits.

Why is this? Actually all registers in the CPU interface (except the w/o
SGI registers) are 32 bits and in the pending 32-bit GICv3 support
series[1] this is exploited by using MRC/MCR accesses.
The only thing that is pure 64-bit is the MRS/MSR _instruction_ in
Aarch64, which always takes a x<nn> register.
So can you model the register size according to the spec and allow
32-bit accesses from userland?

> Since CPU interface state actually affects only a single vCPU, no vGIC
> locking is done. Just made sure that the vCPU is not running.
> 
> Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
> ---
>  Documentation/virtual/kvm/devices/arm-vgic.txt |  38 +++-
>  arch/arm64/include/uapi/asm/kvm.h              |   7 +
>  include/linux/irqchip/arm-gic-v3.h             |  18 +-
>  virt/kvm/arm/vgic-v3-emul.c                    | 244 +++++++++++++++++++++++++
>  4 files changed, 303 insertions(+), 4 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
> index 03f640f..518b634 100644
> --- a/Documentation/virtual/kvm/devices/arm-vgic.txt
> +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
> @@ -88,7 +88,7 @@ Groups:
>      -ENODEV: Getting or setting this register is not yet supported
>      -EBUSY: One or more VCPUs are running
> 
> -  KVM_DEV_ARM_VGIC_GRP_CPU_REGS
> +  KVM_DEV_ARM_VGIC_GRP_CPU_REGS (vGICv2)
>    Attributes:
>      The attr field of kvm_device_attr encodes two values:
>      bits:     | 63   ....  52 | 51 ..  32  |  31   ....    0 |
> @@ -116,11 +116,45 @@ Groups:
> 
>    Limitations:
>      - Priorities are not implemented, and registers are RAZ/WI
> -    - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
>    Errors:
>      -ENODEV: Getting or setting this register is not yet supported
>      -EBUSY: One or more VCPUs are running
> 
> +  KVM_DEV_ARM_VGIC_GRP_CPU_REGS (vGICv3)
> +  Attributes:
> +    The attr field of kvm_device_attr encodes the following values:
> +    bits:   | 63 .. 56 | 55 .. 48 | 47 ... 40 | 39 .. 32 | 31 .. 0 |
> +    values: |   arch   |   size   | reserved  |  cpu id  |  reg id |
> +
> +    All CPU interface regs are (rw, 64-bit). The only supported size value is
> +    KVM_REG_SIZE_U64.
> +
> +    Arch, size and reg id fields actually encode system register to be
> +    accessed. Normally these values are obtained using  ARM64_SYS_REG() macro.
> +    Getting or setting such a register has the same effect as reading or
> +    writing the register on the actual hardware.
> +
> +    The Active Priorities Registers AP0Rn and AP1Rn are implementation defined,
> +    so we set a fixed format for our implementation that fits with the model of
> +    a "GICv3 implementation without the security extensions" which we present
> +    to the guest. This interface always exposes four register APR[0-3]
> +    describing the maximum possible 128 preemption levels. The semantics of the
> +    register indicates if any interrupts in a given preemption level are in the
> +    active state by setting the corresponding bit.
> +
> +    Thus, preemption level X has one or more active interrupts if and only if:
> +
> +      APRn[X mod 32] == 0b1,  where n = X / 32
> +
> +    Bits for undefined preemption levels are RAZ/WI.
> +
> +  Limitations:
> +    - Priorities are not implemented, and registers are RAZ/WI
> +  Errors:
> +    -ENODEV: Getting or setting this register is not yet supported

The code uses -ENXIO.

> +    -EBUSY: One or more VCPUs are running
> +
> +
>    KVM_DEV_ARM_VGIC_GRP_NR_IRQS
>    Attributes:
>      A value describing the number of interrupts (SGI, PPI and SPI) for
> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> index 249954f..7d37ccd 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -201,6 +201,13 @@ struct kvm_arch_memory_slot {
>  #define   KVM_DEV_ARM_VGIC_CPUID_MASK  (0xfffffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
>  #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT        0
>  #define   KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
> +#define   KVM_DEV_ARM_VGIC_REG_MASK    (KVM_REG_SIZE_MASK | \
> +                                        KVM_REG_ARM64_SYSREG_OP0_MASK | \
> +                                        KVM_REG_ARM64_SYSREG_OP1_MASK | \
> +                                        KVM_REG_ARM64_SYSREG_CRN_MASK | \
> +                                        KVM_REG_ARM64_SYSREG_CRM_MASK | \
> +                                        KVM_REG_ARM64_SYSREG_OP2_MASK)
> +
>  #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS   3
>  #define KVM_DEV_ARM_VGIC_GRP_CTRL      4
>  #define   KVM_DEV_ARM_VGIC_CTRL_INIT   0
> diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
> index 9eeeb95..dbc5c49 100644
> --- a/include/linux/irqchip/arm-gic-v3.h
> +++ b/include/linux/irqchip/arm-gic-v3.h
> @@ -259,8 +259,14 @@
>  /*
>   * CPU interface registers
>   */
> -#define ICC_CTLR_EL1_EOImode_drop_dir  (0U << 1)
> -#define ICC_CTLR_EL1_EOImode_drop      (1U << 1)
> +#define ICC_CTLR_EL1_CBPR_SHIFT                0
> +#define ICC_CTLR_EL1_EOImode_SHIFT     1
> +#define ICC_CTLR_EL1_EOImode_drop_dir  (0U << ICC_CTLR_EL1_EOImode_SHIFT)
> +#define ICC_CTLR_EL1_EOImode_drop      (1U << ICC_CTLR_EL1_EOImode_SHIFT)
> +#define ICC_CTLR_EL1_PRIbits_MASK      (7U << 8)
> +#define ICC_CTLR_EL1_IDbits_MASK       (7U << 11)
> +#define ICC_CTLR_EL1_SEIS              (1U << 14)
> +#define ICC_CTLR_EL1_A3V               (1U << 15)
>  #define ICC_SRE_EL1_SRE                        (1U << 0)
> 
>  /*
> @@ -285,6 +291,14 @@
> 
>  #define ICH_VMCR_CTLR_SHIFT            0
>  #define ICH_VMCR_CTLR_MASK             (0x21f << ICH_VMCR_CTLR_SHIFT)
> +#define ICH_VMCR_ENG0_SHIFT            0
> +#define ICH_VMCR_ENG0                  (1 << ICH_VMCR_ENG0_SHIFT)
> +#define ICH_VMCR_ENG1_SHIFT            1
> +#define ICH_VMCR_ENG1                  (1 << ICH_VMCR_ENG1_SHIFT)
> +#define ICH_VMCR_CBPR_SHIFT            4
> +#define ICH_VMCR_CBPR                  (1 << ICH_VMCR_CBPR_SHIFT)
> +#define ICH_VMCR_EOIM_SHIFT            9
> +#define ICH_VMCR_EOIM                  (1 << ICH_VMCR_EOIM_SHIFT)
>  #define ICH_VMCR_BPR1_SHIFT            18
>  #define ICH_VMCR_BPR1_MASK             (7 << ICH_VMCR_BPR1_SHIFT)
>  #define ICH_VMCR_BPR0_SHIFT            21
> diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
> index 8bda714..2f1c27b 100644
> --- a/virt/kvm/arm/vgic-v3-emul.c
> +++ b/virt/kvm/arm/vgic-v3-emul.c
> @@ -48,6 +48,7 @@
>  #include <asm/kvm_arm.h>
>  #include <asm/kvm_mmu.h>
> 
> +#include "sys_regs.h"
>  #include "vgic.h"
> 
>  static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
> @@ -991,6 +992,247 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
>                 vgic_kick_vcpus(vcpu->kvm);
>  }
> 
> +static bool access_gic_ctlr(struct kvm_vcpu *vcpu,
> +                           const struct sys_reg_params *p,
> +                           const struct sys_reg_desc *r)
> +{
> +       u64 val;
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +       if (p->is_write) {
> +               val = *p->val;
> +
> +               vgicv3->vgic_vmcr &= ~(ICH_VMCR_CBPR|ICH_VMCR_EOIM);
> +               vgicv3->vgic_vmcr |= (val << (ICH_VMCR_CBPR_SHIFT -
> +                                               ICC_CTLR_EL1_CBPR_SHIFT)) &
> +                                       ICH_VMCR_CBPR;
> +               vgicv3->vgic_vmcr |= (val << (ICH_VMCR_EOIM_SHIFT -
> +                                               ICC_CTLR_EL1_EOImode_SHIFT)) &
> +                                       ICH_VMCR_EOIM;
> +       } else {
> +               asm volatile("mrs_s %0," __stringify(ICC_IAR1_EL1)
> +                            : "=r" (val));
> +               val &= (ICC_CTLR_EL1_A3V | ICC_CTLR_EL1_SEIS |
> +                       ICC_CTLR_EL1_IDbits_MASK | ICC_CTLR_EL1_PRIbits_MASK);
> +               val |= (vgicv3->vgic_vmcr & ICH_VMCR_CBPR) >>
> +                       (ICH_VMCR_CBPR_SHIFT - ICC_CTLR_EL1_CBPR_SHIFT);
> +               val |= (vgicv3->vgic_vmcr & ICH_VMCR_EOIM) >>
> +                       (ICH_VMCR_EOIM_SHIFT - ICC_CTLR_EL1_EOImode_SHIFT);
> +
> +               *p->val = val;
> +       }
> +
> +       return true;
> +}
> +
> +static bool access_gic_pmr(struct kvm_vcpu *vcpu,
> +                          const struct sys_reg_params *p,
> +                          const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +       if (p->is_write) {
> +               vgicv3->vgic_vmcr &= ~ICH_VMCR_PMR_MASK;
> +               vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_PMR_SHIFT) &
> +                                       ICH_VMCR_PMR_MASK;
> +       } else {
> +               *p->val = (vgicv3->vgic_vmcr & ICH_VMCR_PMR_MASK) >>
> +                         ICH_VMCR_PMR_SHIFT;
> +       }
> +
> +       return true;
> +}
> +
> +static bool access_gic_bpr0(struct kvm_vcpu *vcpu,
> +                           const struct sys_reg_params *p,
> +                           const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +       if (p->is_write) {
> +               vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR0_MASK;
> +               vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_BPR0_SHIFT) &
> +                                    ICH_VMCR_BPR0_MASK;
> +       } else {
> +               *p->val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR0_MASK) >>
> +                         ICH_VMCR_BPR0_SHIFT;
> +       }
> +
> +       return true;
> +}
> +
> +static bool access_gic_bpr1(struct kvm_vcpu *vcpu,
> +                           const struct sys_reg_params *p,
> +                           const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +       if (p->is_write) {
> +               vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR1_MASK;
> +               vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_BPR1_SHIFT) &
> +                                    ICH_VMCR_BPR1_MASK;
> +       } else {
> +               *p->val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR1_MASK) >>
> +                         ICH_VMCR_BPR1_SHIFT;
> +       }
> +
> +       return true;
> +}
> +
> +static bool access_gic_grpen0(struct kvm_vcpu *vcpu,
> +                             const struct sys_reg_params *p,
> +                             const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +       if (p->is_write) {
> +               vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG0;
> +               vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_ENG0_SHIFT) &
> +                                    ICH_VMCR_ENG0;
> +       } else {
> +               *p->val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG0) >>
> +                         ICH_VMCR_ENG0_SHIFT;
> +       }
> +
> +       return true;
> +}
> +
> +static bool access_gic_grpen1(struct kvm_vcpu *vcpu,
> +                             const struct sys_reg_params *p,
> +                             const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +       if (p->is_write) {
> +               vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG1;
> +               vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_ENG1_SHIFT) &
> +                                    ICH_VMCR_ENG1;
> +       } else {
> +               *p->val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG1) >>
> +                         ICH_VMCR_ENG1_SHIFT;
> +       }
> +
> +       return true;
> +}

I wonder if the 5 functions above could be merged to have only one
implementation and 5 wrappers, since they are so similar.

Cheers,
Andre.

[1]
http://lists.infradead.org/pipermail/linux-arm-kernel/2015-March/327034.html

> +
> +static bool access_gic_ap0r(struct kvm_vcpu *vcpu,
> +                             const struct sys_reg_params *p,
> +                             const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +       u8 idx = r->Op2 & 3;
> +
> +       if (p->is_write)
> +               vgicv3->vgic_ap0r[idx] = *p->val;
> +       else
> +               *p->val = vgicv3->vgic_ap0r[idx];
> +
> +       return true;
> +}
> +
> +static bool access_gic_ap1r(struct kvm_vcpu *vcpu,
> +                             const struct sys_reg_params *p,
> +                             const struct sys_reg_desc *r)
> +{
> +       struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +       u8 idx = r->Op2 & 3;
> +
> +       if (p->is_write)
> +               vgicv3->vgic_ap1r[idx] = *p->val;
> +       else
> +               *p->val = vgicv3->vgic_ap1r[idx];
> +
> +       return true;
> +}
> +
> +static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
> +       /* ICC_PMR_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b0100), CRm(0b0110), Op2(0b000),
> +         access_gic_pmr },
> +       /* ICC_BPR0_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b011),
> +         access_gic_bpr0 },
> +       /* ICC_AP0R0_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b100),
> +         access_gic_ap0r },
> +       /* ICC_AP0R1_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b101),
> +         access_gic_ap0r },
> +       /* ICC_AP0R2_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b110),
> +         access_gic_ap0r },
> +       /* ICC_AP0R3_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b111),
> +         access_gic_ap0r },
> +       /* ICC_AP1R0_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b000),
> +         access_gic_ap1r },
> +       /* ICC_AP1R1_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b001),
> +         access_gic_ap1r },
> +       /* ICC_AP1R2_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b010),
> +         access_gic_ap1r },
> +       /* ICC_AP1R3_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b011),
> +         access_gic_ap1r },
> +       /* ICC_BPR1_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b011),
> +         access_gic_bpr1 },
> +       /* ICC_CTLR_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b100),
> +         access_gic_ctlr },
> +       /* ICC_IGRPEN0_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b110),
> +         access_gic_grpen0 },
> +       /* ICC_GRPEN1_EL1 */
> +       { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b111),
> +         access_gic_grpen1 },
> +};
> +
> +static int vgic_v3_cpu_regs_access(struct kvm_device *dev,
> +                                  struct kvm_device_attr *attr,
> +                                  bool is_write, int cpuid)
> +{
> +       u64 __user *uaddr = (u64 __user *)(long)attr->addr;
> +       u64 id = attr->attr & KVM_DEV_ARM_VGIC_REG_MASK;
> +       struct kvm_vcpu *vcpu;
> +       struct sys_reg_params params;
> +       u_long reg;
> +       const struct sys_reg_desc *r;
> +
> +       params.val = &reg;
> +       params.is_write = is_write;
> +       params.is_aarch32 = false;
> +       params.is_32bit = false;
> +
> +       r = find_reg_by_id(id, &params, gic_v3_icc_reg_descs,
> +                          ARRAY_SIZE(gic_v3_icc_reg_descs));
> +       if (!r)
> +               return -ENXIO;
> +
> +       if (is_write) {
> +               if (get_user(reg, uaddr))
> +                       return -EFAULT;
> +       }
> +
> +       if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
> +               return -EINVAL;
> +
> +       vcpu = kvm_get_vcpu(dev->kvm, cpuid);
> +       /* Ensure that VCPU is not running */
> +       if (unlikely(vcpu->cpu != -1))
> +                       return -EBUSY;
> +
> +       if (!r->access(vcpu, &params, r))
> +               return -EINVAL;
> +
> +       if (!is_write)
> +               return put_user(reg, uaddr);
> +
> +       return 0;
> +}
> +
>  static int vgic_v3_attr_regs_access(struct kvm_device *dev,
>                                     struct kvm_device_attr *attr,
>                                     bool is_write)
> @@ -1015,6 +1257,8 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
>                 mmio.phys_addr = vgic->vgic_redist_base;
>                 ranges = vgic_redist_ranges;
>                 break;
> +       case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
> +               return vgic_v3_cpu_regs_access(dev, attr, is_write, cpuid);
>         default:
>                 return -ENXIO;
>         }
> --
> 2.4.4
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Pavel Fedin Sept. 4, 2015, 3:40 p.m. UTC | #2
Hello!

> So can you model the register size according to the spec and allow
> 32-bit accesses from userland?

 I can. But i'll have to invent my own macro for encoding register IDs into the attribute, as well
as drop reusing index_to_params(). Will it be OK?
 Upside: i can further extend "cpu id" (actually CPU index) field to 31 bit. :)

 It's friday evening here, work week is almost over, i'll read your replies 2 days later on monday.

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Pavel Fedin Sept. 24, 2015, 12:08 p.m. UTC | #3
Hello!

> The only thing that is pure 64-bit is the MRS/MSR _instruction_ in
> Aarch64, which always takes a x<nn> register.
> So can you model the register size according to the spec and allow
> 32-bit accesses from userland?

 I would like to complete the rework and respin v4, but this is, i guess, the only major issue left.
Additionally, it impacts the API. So...
 In order to allow 32-bit accesses we would have to drop using ARM64_SYS_REG() for building
attribute ID and introduce something own, like KVM_DEV_ARM_VGIC_REG(). It will have different bits
layout (actually it will be missing 'arch' and 'size' field, and instead i will use
KVM_DEV_ARM_VGIC_64BIT flag for length specification, the same as for redistributor.
 Will this be OK ?

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andre Przywara Sept. 25, 2015, 10:27 p.m. UTC | #4
On 24/09/15 13:08, Pavel Fedin wrote:
>  Hello!
> 
>> The only thing that is pure 64-bit is the MRS/MSR _instruction_ in
>> Aarch64, which always takes a x<nn> register.
>> So can you model the register size according to the spec and allow
>> 32-bit accesses from userland?
> 
>  I would like to complete the rework and respin v4, but this is, i guess, the only major issue left.
> Additionally, it impacts the API. So...
>  In order to allow 32-bit accesses we would have to drop using ARM64_SYS_REG() for building
> attribute ID and introduce something own, like KVM_DEV_ARM_VGIC_REG(). It will have different bits
> layout (actually it will be missing 'arch' and 'size' field, and instead i will use
> KVM_DEV_ARM_VGIC_64BIT flag for length specification, the same as for redistributor.
>  Will this be OK ?

No, instead you should go with your original approach ;-)
Thinking about that again I see that this interface is of course modeled
after the architectured GICv3 system registers, where AArch32 has its
own, separate encoding. So it's perfectly fine to use that 64-bit
interface between userland and KVM now. If we later get Aarch32 support
for the GICv3, we can add the appropriate Aarch32 sysregs to that
interface and have a natural match.

So: sorry for the noise, you can just go ahead with that native 64-bit
sysregs encoding for [SG]ET_ONE_REG as you had before.

Cheers,
Andre.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Maydell Sept. 25, 2015, 10:33 p.m. UTC | #5
On 25 September 2015 at 15:27, Andre Przywara <andre.przywara@arm.com> wrote:
> On 24/09/15 13:08, Pavel Fedin wrote:
>>  Hello!
>>
>>> The only thing that is pure 64-bit is the MRS/MSR _instruction_ in
>>> Aarch64, which always takes a x<nn> register.
>>> So can you model the register size according to the spec and allow
>>> 32-bit accesses from userland?
>>
>>  I would like to complete the rework and respin v4, but this is, i guess, the only major issue left.
>> Additionally, it impacts the API. So...
>>  In order to allow 32-bit accesses we would have to drop using ARM64_SYS_REG() for building
>> attribute ID and introduce something own, like KVM_DEV_ARM_VGIC_REG(). It will have different bits
>> layout (actually it will be missing 'arch' and 'size' field, and instead i will use
>> KVM_DEV_ARM_VGIC_64BIT flag for length specification, the same as for redistributor.
>>  Will this be OK ?
>
> No, instead you should go with your original approach ;-)
> Thinking about that again I see that this interface is of course modeled
> after the architectured GICv3 system registers, where AArch32 has its
> own, separate encoding. So it's perfectly fine to use that 64-bit
> interface between userland and KVM now. If we later get Aarch32 support
> for the GICv3, we can add the appropriate Aarch32 sysregs to that
> interface and have a natural match.

Christoffer, Marc and I had a discussion about how best to handle GICv3
state save and restore here at Linaro Connect this week, and one of the
conclusions there was that all of it should be done via the device
fd's save/restore interface, not via the SET/GET_ONE_REG CPU register
interface. (Because it's much less confusing to have all of the GICv3's
state be dealt with via the same interface, rather than splitting it;
it's also more convenient for QEMU.)

More details to follow from one of us next week when we aren't all
on aeroplanes, I expect.

thanks
-- PMM
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marc Zyngier Sept. 25, 2015, 11 p.m. UTC | #6
On Fri, 25 Sep 2015 15:33:44 -0700
Peter Maydell <peter.maydell@linaro.org> wrote:

> On 25 September 2015 at 15:27, Andre Przywara <andre.przywara@arm.com> wrote:
> > On 24/09/15 13:08, Pavel Fedin wrote:
> >>  Hello!
> >>
> >>> The only thing that is pure 64-bit is the MRS/MSR _instruction_ in
> >>> Aarch64, which always takes a x<nn> register.
> >>> So can you model the register size according to the spec and allow
> >>> 32-bit accesses from userland?
> >>
> >>  I would like to complete the rework and respin v4, but this is, i guess, the only major issue left.
> >> Additionally, it impacts the API. So...
> >>  In order to allow 32-bit accesses we would have to drop using ARM64_SYS_REG() for building
> >> attribute ID and introduce something own, like KVM_DEV_ARM_VGIC_REG(). It will have different bits
> >> layout (actually it will be missing 'arch' and 'size' field, and instead i will use
> >> KVM_DEV_ARM_VGIC_64BIT flag for length specification, the same as for redistributor.
> >>  Will this be OK ?
> >
> > No, instead you should go with your original approach ;-)
> > Thinking about that again I see that this interface is of course modeled
> > after the architectured GICv3 system registers, where AArch32 has its
> > own, separate encoding. So it's perfectly fine to use that 64-bit
> > interface between userland and KVM now. If we later get Aarch32 support
> > for the GICv3, we can add the appropriate Aarch32 sysregs to that
> > interface and have a natural match.
> 
> Christoffer, Marc and I had a discussion about how best to handle GICv3
> state save and restore here at Linaro Connect this week, and one of the
> conclusions there was that all of it should be done via the device
> fd's save/restore interface, not via the SET/GET_ONE_REG CPU register
> interface. (Because it's much less confusing to have all of the GICv3's
> state be dealt with via the same interface, rather than splitting it;
> it's also more convenient for QEMU.)
> 
> More details to follow from one of us next week when we aren't all
> on aeroplanes, I expect.

Indeed. Another thing to consider now is (at the ITS level) the
necessity to be able to flush the ITS "caches" (basically our internal
data structures) out to guest memory so that it can be saved/restored.

That's pretty easy for the pending and property tables as well as
the command queue (where the format is fixed by the architecture), but
slightly more complicated with the device table, the collection table
and the various ITTs (we need to come up with a formal representation
that effectively becomes an ABI).

From a flow point of view, I expect the flush to at least occur when
the VM gets suspended, ensuring that the full interrupt state can be
read out via the userspace mapping of the guest memory.

That being said, I'm off to catch that plane.

Thanks,

	M.
Pavel Fedin Sept. 28, 2015, 3:29 p.m. UTC | #7
Hello!

> So: sorry for the noise, you can just go ahead with that native 64-bit
> sysregs encoding for [SG]ET_ONE_REG as you had before.

 Ok, good. Take v4 then. Some issues you've commented on were fixed, some other things were left as
they are (i replied to your comments, why). Let's move on. :)

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 03f640f..518b634 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -88,7 +88,7 @@  Groups:
     -ENODEV: Getting or setting this register is not yet supported
     -EBUSY: One or more VCPUs are running
 
-  KVM_DEV_ARM_VGIC_GRP_CPU_REGS
+  KVM_DEV_ARM_VGIC_GRP_CPU_REGS (vGICv2)
   Attributes:
     The attr field of kvm_device_attr encodes two values:
     bits:     | 63   ....  52 | 51 ..  32  |  31   ....    0 |
@@ -116,11 +116,45 @@  Groups:
 
   Limitations:
     - Priorities are not implemented, and registers are RAZ/WI
-    - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
   Errors:
     -ENODEV: Getting or setting this register is not yet supported
     -EBUSY: One or more VCPUs are running
 
+  KVM_DEV_ARM_VGIC_GRP_CPU_REGS (vGICv3)
+  Attributes:
+    The attr field of kvm_device_attr encodes the following values:
+    bits:   | 63 .. 56 | 55 .. 48 | 47 ... 40 | 39 .. 32 | 31 .. 0 |
+    values: |   arch   |   size   | reserved  |  cpu id  |  reg id |
+
+    All CPU interface regs are (rw, 64-bit). The only supported size value is
+    KVM_REG_SIZE_U64.
+
+    Arch, size and reg id fields actually encode system register to be
+    accessed. Normally these values are obtained using  ARM64_SYS_REG() macro.
+    Getting or setting such a register has the same effect as reading or
+    writing the register on the actual hardware.
+
+    The Active Priorities Registers AP0Rn and AP1Rn are implementation defined,
+    so we set a fixed format for our implementation that fits with the model of
+    a "GICv3 implementation without the security extensions" which we present
+    to the guest. This interface always exposes four register APR[0-3]
+    describing the maximum possible 128 preemption levels. The semantics of the
+    register indicates if any interrupts in a given preemption level are in the
+    active state by setting the corresponding bit.
+
+    Thus, preemption level X has one or more active interrupts if and only if:
+
+      APRn[X mod 32] == 0b1,  where n = X / 32
+
+    Bits for undefined preemption levels are RAZ/WI.
+
+  Limitations:
+    - Priorities are not implemented, and registers are RAZ/WI
+  Errors:
+    -ENODEV: Getting or setting this register is not yet supported
+    -EBUSY: One or more VCPUs are running
+
+
   KVM_DEV_ARM_VGIC_GRP_NR_IRQS
   Attributes:
     A value describing the number of interrupts (SGI, PPI and SPI) for
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 249954f..7d37ccd 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -201,6 +201,13 @@  struct kvm_arch_memory_slot {
 #define   KVM_DEV_ARM_VGIC_CPUID_MASK	(0xfffffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
 #define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0
 #define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define   KVM_DEV_ARM_VGIC_REG_MASK	(KVM_REG_SIZE_MASK | \
+					 KVM_REG_ARM64_SYSREG_OP0_MASK | \
+					 KVM_REG_ARM64_SYSREG_OP1_MASK | \
+					 KVM_REG_ARM64_SYSREG_CRN_MASK | \
+					 KVM_REG_ARM64_SYSREG_CRM_MASK | \
+					 KVM_REG_ARM64_SYSREG_OP2_MASK)
+
 #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS	3
 #define KVM_DEV_ARM_VGIC_GRP_CTRL	4
 #define   KVM_DEV_ARM_VGIC_CTRL_INIT	0
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9eeeb95..dbc5c49 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -259,8 +259,14 @@ 
 /*
  * CPU interface registers
  */
-#define ICC_CTLR_EL1_EOImode_drop_dir	(0U << 1)
-#define ICC_CTLR_EL1_EOImode_drop	(1U << 1)
+#define ICC_CTLR_EL1_CBPR_SHIFT		0
+#define ICC_CTLR_EL1_EOImode_SHIFT	1
+#define ICC_CTLR_EL1_EOImode_drop_dir	(0U << ICC_CTLR_EL1_EOImode_SHIFT)
+#define ICC_CTLR_EL1_EOImode_drop	(1U << ICC_CTLR_EL1_EOImode_SHIFT)
+#define ICC_CTLR_EL1_PRIbits_MASK	(7U << 8)
+#define ICC_CTLR_EL1_IDbits_MASK	(7U << 11)
+#define ICC_CTLR_EL1_SEIS		(1U << 14)
+#define ICC_CTLR_EL1_A3V		(1U << 15)
 #define ICC_SRE_EL1_SRE			(1U << 0)
 
 /*
@@ -285,6 +291,14 @@ 
 
 #define ICH_VMCR_CTLR_SHIFT		0
 #define ICH_VMCR_CTLR_MASK		(0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_ENG0_SHIFT		0
+#define ICH_VMCR_ENG0			(1 << ICH_VMCR_ENG0_SHIFT)
+#define ICH_VMCR_ENG1_SHIFT		1
+#define ICH_VMCR_ENG1			(1 << ICH_VMCR_ENG1_SHIFT)
+#define ICH_VMCR_CBPR_SHIFT		4
+#define ICH_VMCR_CBPR			(1 << ICH_VMCR_CBPR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT		9
+#define ICH_VMCR_EOIM			(1 << ICH_VMCR_EOIM_SHIFT)
 #define ICH_VMCR_BPR1_SHIFT		18
 #define ICH_VMCR_BPR1_MASK		(7 << ICH_VMCR_BPR1_SHIFT)
 #define ICH_VMCR_BPR0_SHIFT		21
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index 8bda714..2f1c27b 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -48,6 +48,7 @@ 
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
 
+#include "sys_regs.h"
 #include "vgic.h"
 
 static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
@@ -991,6 +992,247 @@  void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
 		vgic_kick_vcpus(vcpu->kvm);
 }
 
+static bool access_gic_ctlr(struct kvm_vcpu *vcpu,
+			    const struct sys_reg_params *p,
+			    const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (p->is_write) {
+		val = *p->val;
+
+		vgicv3->vgic_vmcr &= ~(ICH_VMCR_CBPR|ICH_VMCR_EOIM);
+		vgicv3->vgic_vmcr |= (val << (ICH_VMCR_CBPR_SHIFT -
+						ICC_CTLR_EL1_CBPR_SHIFT)) &
+					ICH_VMCR_CBPR;
+		vgicv3->vgic_vmcr |= (val << (ICH_VMCR_EOIM_SHIFT -
+						ICC_CTLR_EL1_EOImode_SHIFT)) &
+					ICH_VMCR_EOIM;
+	} else {
+		asm volatile("mrs_s %0," __stringify(ICC_IAR1_EL1)
+			     : "=r" (val));
+		val &= (ICC_CTLR_EL1_A3V | ICC_CTLR_EL1_SEIS |
+			ICC_CTLR_EL1_IDbits_MASK | ICC_CTLR_EL1_PRIbits_MASK);
+		val |= (vgicv3->vgic_vmcr & ICH_VMCR_CBPR) >>
+			(ICH_VMCR_CBPR_SHIFT - ICC_CTLR_EL1_CBPR_SHIFT);
+		val |= (vgicv3->vgic_vmcr & ICH_VMCR_EOIM) >>
+			(ICH_VMCR_EOIM_SHIFT - ICC_CTLR_EL1_EOImode_SHIFT);
+
+		*p->val = val;
+	}
+
+	return true;
+}
+
+static bool access_gic_pmr(struct kvm_vcpu *vcpu,
+			   const struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (p->is_write) {
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_PMR_MASK;
+		vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_PMR_SHIFT) &
+					ICH_VMCR_PMR_MASK;
+	} else {
+		*p->val = (vgicv3->vgic_vmcr & ICH_VMCR_PMR_MASK) >>
+			  ICH_VMCR_PMR_SHIFT;
+	}
+
+	return true;
+}
+
+static bool access_gic_bpr0(struct kvm_vcpu *vcpu,
+			    const struct sys_reg_params *p,
+			    const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (p->is_write) {
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR0_MASK;
+		vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_BPR0_SHIFT) &
+				     ICH_VMCR_BPR0_MASK;
+	} else {
+		*p->val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR0_MASK) >>
+			  ICH_VMCR_BPR0_SHIFT;
+	}
+
+	return true;
+}
+
+static bool access_gic_bpr1(struct kvm_vcpu *vcpu,
+			    const struct sys_reg_params *p,
+			    const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (p->is_write) {
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR1_MASK;
+		vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_BPR1_SHIFT) &
+				     ICH_VMCR_BPR1_MASK;
+	} else {
+		*p->val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR1_MASK) >>
+			  ICH_VMCR_BPR1_SHIFT;
+	}
+
+	return true;
+}
+
+static bool access_gic_grpen0(struct kvm_vcpu *vcpu,
+			      const struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (p->is_write) {
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG0;
+		vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_ENG0_SHIFT) &
+				     ICH_VMCR_ENG0;
+	} else {
+		*p->val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG0) >>
+			  ICH_VMCR_ENG0_SHIFT;
+	}
+
+	return true;
+}
+
+static bool access_gic_grpen1(struct kvm_vcpu *vcpu,
+			      const struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (p->is_write) {
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG1;
+		vgicv3->vgic_vmcr |= (*p->val << ICH_VMCR_ENG1_SHIFT) &
+				     ICH_VMCR_ENG1;
+	} else {
+		*p->val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG1) >>
+			  ICH_VMCR_ENG1_SHIFT;
+	}
+
+	return true;
+}
+
+static bool access_gic_ap0r(struct kvm_vcpu *vcpu,
+			      const struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+	u8 idx = r->Op2 & 3;
+
+	if (p->is_write)
+		vgicv3->vgic_ap0r[idx] = *p->val;
+	else
+		*p->val = vgicv3->vgic_ap0r[idx];
+
+	return true;
+}
+
+static bool access_gic_ap1r(struct kvm_vcpu *vcpu,
+			      const struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+	u8 idx = r->Op2 & 3;
+
+	if (p->is_write)
+		vgicv3->vgic_ap1r[idx] = *p->val;
+	else
+		*p->val = vgicv3->vgic_ap1r[idx];
+
+	return true;
+}
+
+static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
+	/* ICC_PMR_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b0100), CRm(0b0110), Op2(0b000),
+	  access_gic_pmr },
+	/* ICC_BPR0_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b011),
+	  access_gic_bpr0 },
+	/* ICC_AP0R0_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b100),
+	  access_gic_ap0r },
+	/* ICC_AP0R1_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b101),
+	  access_gic_ap0r },
+	/* ICC_AP0R2_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b110),
+	  access_gic_ap0r },
+	/* ICC_AP0R3_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b111),
+	  access_gic_ap0r },
+	/* ICC_AP1R0_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b000),
+	  access_gic_ap1r },
+	/* ICC_AP1R1_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b001),
+	  access_gic_ap1r },
+	/* ICC_AP1R2_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b010),
+	  access_gic_ap1r },
+	/* ICC_AP1R3_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1001), Op2(0b011),
+	  access_gic_ap1r },
+	/* ICC_BPR1_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b011),
+	  access_gic_bpr1 },
+	/* ICC_CTLR_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b100),
+	  access_gic_ctlr },
+	/* ICC_IGRPEN0_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b110),
+	  access_gic_grpen0 },
+	/* ICC_GRPEN1_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b111),
+	  access_gic_grpen1 },
+};
+
+static int vgic_v3_cpu_regs_access(struct kvm_device *dev,
+				   struct kvm_device_attr *attr,
+				   bool is_write, int cpuid)
+{
+	u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+	u64 id = attr->attr & KVM_DEV_ARM_VGIC_REG_MASK;
+	struct kvm_vcpu *vcpu;
+	struct sys_reg_params params;
+	u_long reg;
+	const struct sys_reg_desc *r;
+
+	params.val = &reg;
+	params.is_write = is_write;
+	params.is_aarch32 = false;
+	params.is_32bit = false;
+
+	r = find_reg_by_id(id, &params, gic_v3_icc_reg_descs,
+			   ARRAY_SIZE(gic_v3_icc_reg_descs));
+	if (!r)
+		return -ENXIO;
+
+	if (is_write) {
+		if (get_user(reg, uaddr))
+			return -EFAULT;
+	}
+
+	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+	/* Ensure that VCPU is not running */
+	if (unlikely(vcpu->cpu != -1))
+			return -EBUSY;
+
+	if (!r->access(vcpu, &params, r))
+		return -EINVAL;
+
+	if (!is_write)
+		return put_user(reg, uaddr);
+
+	return 0;
+}
+
 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
 				    struct kvm_device_attr *attr,
 				    bool is_write)
@@ -1015,6 +1257,8 @@  static int vgic_v3_attr_regs_access(struct kvm_device *dev,
 		mmio.phys_addr = vgic->vgic_redist_base;
 		ranges = vgic_redist_ranges;
 		break;
+	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+		return vgic_v3_cpu_regs_access(dev, attr, is_write, cpuid);
 	default:
 		return -ENXIO;
 	}