diff mbox

[3/3] KVM: arm64: Implement accessors for vGIC CPU interface registers

Message ID 2857f7cad7c17109dfa3028f79af28893c0171ce.1440766141.git.p.fedin@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Pavel Fedin Aug. 28, 2015, 12:56 p.m. UTC
This commit adds accessors for all registers, being part of saved vGIC
context in the form of ICH_VMCR_EL2. This is necessary for enabling vGICv3
live migration.

Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
---
 arch/arm64/kvm/sys_regs.c          | 176 +++++++++++++++++++++++++++++++++++++
 include/linux/irqchip/arm-gic-v3.h |  18 +++-
 2 files changed, 192 insertions(+), 2 deletions(-)

Comments

Christoffer Dall Aug. 30, 2015, 4:50 p.m. UTC | #1
On Fri, Aug 28, 2015 at 03:56:12PM +0300, Pavel Fedin wrote:
> This commit adds accessors for all registers, being part of saved vGIC
> context in the form of ICH_VMCR_EL2. This is necessary for enabling vGICv3
> live migration.
> 
> Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
> ---
>  arch/arm64/kvm/sys_regs.c          | 176 +++++++++++++++++++++++++++++++++++++
>  include/linux/irqchip/arm-gic-v3.h |  18 +++-
>  2 files changed, 192 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 8cc4a5e..7a4f982 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -23,6 +23,7 @@
>  #include <linux/kvm_host.h>
>  #include <linux/mm.h>
>  #include <linux/uaccess.h>
> +#include <linux/irqchip/arm-gic-v3.h>
>  
>  #include <asm/cacheflush.h>
>  #include <asm/cputype.h>
> @@ -136,6 +137,162 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
>  	return true;
>  }
>  
> +static bool access_gic_ctlr(struct kvm_vcpu *vcpu,
> +			    const struct sys_reg_params *p,
> +			    const struct sys_reg_desc *r)
> +{
> +	u64 val;
> +	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
> +		return false;
> +
> +	if (p->is_write) {
> +		val = *vcpu_reg(vcpu, p->Rt);
> +
> +		vgicv3->vgic_vmcr &= ~(ICH_VMCR_CBPR|ICH_VMCR_EOIM);
> +		vgicv3->vgic_vmcr |= (val << (ICH_VMCR_CBPR_SHIFT -
> +						ICC_CTLR_EL1_CBPR_SHIFT)) &
> +					ICH_VMCR_CBPR;
> +		vgicv3->vgic_vmcr |= (val << (ICH_VMCR_EOIM_SHIFT -
> +						ICC_CTLR_EL1_EOImode_SHIFT)) &
> +					ICH_VMCR_EOIM;
> +	} else {
> +		asm volatile("mrs_s %0," __stringify(ICC_IAR1_EL1)
> +			     : "=r" (val));
> +		val &= (ICC_CTLR_EL1_A3V | ICC_CTLR_EL1_SEIS |
> +			ICC_CTLR_EL1_IDbits_MASK | ICC_CTLR_EL1_PRIbits_MASK);
> +		val |= (vgicv3->vgic_vmcr & ICH_VMCR_CBPR) >>
> +			(ICH_VMCR_CBPR_SHIFT - ICC_CTLR_EL1_CBPR_SHIFT);
> +		val |= (vgicv3->vgic_vmcr & ICH_VMCR_EOIM) >>
> +			(ICH_VMCR_EOIM_SHIFT - ICC_CTLR_EL1_EOImode_SHIFT);
> +
> +		*vcpu_reg(vcpu, p->Rt) = val;
> +	}
> +
> +	return true;
> +}
> +
> +static bool access_gic_pmr(struct kvm_vcpu *vcpu,
> +			   const struct sys_reg_params *p,
> +			   const struct sys_reg_desc *r)
> +{
> +	u64 val;
> +	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
> +		return false;
> +
> +	if (p->is_write) {
> +		val = *vcpu_reg(vcpu, p->Rt);
> +		vgicv3->vgic_vmcr &= ~ICH_VMCR_PMR_MASK;
> +		vgicv3->vgic_vmcr |= (val << ICH_VMCR_PMR_SHIFT) &
> +					ICH_VMCR_PMR_MASK;
> +	} else {
> +		val = (vgicv3->vgic_vmcr & ICH_VMCR_PMR_MASK) >>
> +			ICH_VMCR_PMR_SHIFT;
> +		*vcpu_reg(vcpu, p->Rt) = val;
> +	}
> +
> +	return true;
> +}
> +
> +static bool access_gic_bpr0(struct kvm_vcpu *vcpu,
> +			    const struct sys_reg_params *p,
> +			    const struct sys_reg_desc *r)
> +{
> +	u64 val;
> +	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
> +		return false;
> +
> +	if (p->is_write) {
> +		val = *vcpu_reg(vcpu, p->Rt);
> +		vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR0_MASK;
> +		vgicv3->vgic_vmcr |= (val << ICH_VMCR_BPR0_SHIFT) &
> +					ICH_VMCR_BPR0_MASK;
> +	} else {
> +		val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR0_MASK) >>
> +			ICH_VMCR_BPR0_SHIFT;
> +		*vcpu_reg(vcpu, p->Rt) = val;
> +	}
> +
> +	return true;
> +}
> +
> +static bool access_gic_bpr1(struct kvm_vcpu *vcpu,
> +			    const struct sys_reg_params *p,
> +			    const struct sys_reg_desc *r)
> +{
> +	u64 val;
> +	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
> +		return false;
> +
> +	if (p->is_write) {
> +		val = *vcpu_reg(vcpu, p->Rt);
> +		vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR1_MASK;
> +		vgicv3->vgic_vmcr |= (val << ICH_VMCR_BPR1_SHIFT) &
> +					ICH_VMCR_BPR1_MASK;
> +	} else {
> +		val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR1_MASK) >>
> +			ICH_VMCR_BPR1_SHIFT;
> +		*vcpu_reg(vcpu, p->Rt) = val;
> +	}
> +
> +	return true;
> +}
> +
> +static bool access_gic_grpen0(struct kvm_vcpu *vcpu,
> +			      const struct sys_reg_params *p,
> +			      const struct sys_reg_desc *r)
> +{
> +	u64 val;
> +	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
> +		return false;
> +
> +	if (p->is_write) {
> +		val = *vcpu_reg(vcpu, p->Rt);
> +		vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG0;
> +		vgicv3->vgic_vmcr |= (val << ICH_VMCR_ENG0_SHIFT) &
> +					ICH_VMCR_ENG0;
> +	} else {
> +		val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG0) >>
> +			ICH_VMCR_ENG0_SHIFT;
> +		*vcpu_reg(vcpu, p->Rt) = val;
> +	}
> +
> +	return true;
> +}
> +
> +static bool access_gic_grpen1(struct kvm_vcpu *vcpu,
> +			      const struct sys_reg_params *p,
> +			      const struct sys_reg_desc *r)
> +{
> +	u64 val;
> +	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
> +
> +	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
> +		return false;
> +
> +	if (p->is_write) {
> +		val = *vcpu_reg(vcpu, p->Rt);
> +		vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG1;
> +		vgicv3->vgic_vmcr |= (val << ICH_VMCR_ENG1_SHIFT) &
> +					ICH_VMCR_ENG1;
> +	} else {
> +		val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG1) >>
> +			ICH_VMCR_ENG1_SHIFT;
> +		*vcpu_reg(vcpu, p->Rt) = val;
> +	}
> +
> +	return true;
> +}
> +
>  static bool trap_raz_wi(struct kvm_vcpu *vcpu,
>  			const struct sys_reg_params *p,
>  			const struct sys_reg_desc *r)
> @@ -579,6 +736,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
>  	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
>  	  access_vm_reg, reset_val, TCR_EL1, 0 },
>  
> +	/* ICC_PMR_EL1 */
> +	{ Op0(0b11), Op1(0b000), CRn(0b0100), CRm(0b0110), Op2(0b000),
> +	  access_gic_pmr },
> +
>  	/* AFSR0_EL1 */
>  	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
>  	  access_vm_reg, reset_unknown, AFSR0_EL1 },
> @@ -613,12 +774,27 @@ static const struct sys_reg_desc sys_reg_descs[] = {
>  	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
>  	  NULL, reset_val, VBAR_EL1, 0 },
>  
> +	/* ICC_BPR0_EL1 */
> +	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b011),
> +	  access_gic_bpr0 },
>  	/* ICC_SGI1R_EL1 */
>  	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
>  	  access_gic_sgi },
> +	/* ICC_BPR1_EL1 */
> +	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b011),
> +	  access_gic_bpr1 },
> +	/* ICC_CTLR_EL1 */
> +	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b100),
> +	  access_gic_ctlr },
>  	/* ICC_SRE_EL1 */
>  	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
>  	  trap_raz_wi },
> +	/* ICC_IGRPEN0_EL1 */
> +	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b110),
> +	  access_gic_grpen0 },
> +	/* ICC_GRPEN1_EL1 */
> +	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b111),
> +	  access_gic_grpen1 },
>  

I had imagined we would encode the GICv3 register accesses through the
device API and not through the system register API, since I'm not crazy
about polluting the general system register handling logic with GIC
registers solely for the purposes of migration.

I'd much rather have this logic locally to the gic files.

Have you thought about proper locking/serializing of access to the GIC
state in these accessor functions?  Seems like this deserves a comment
in the commit log at least and will probably be easier to understand in
the context of the vgic code.

-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Maydell Aug. 30, 2015, 6:39 p.m. UTC | #2
On 30 August 2015 at 17:50, Christoffer Dall
<christoffer.dall@linaro.org> wrote:
> I had imagined we would encode the GICv3 register accesses through the
> device API and not through the system register API, since I'm not crazy
> about polluting the general system register handling logic with GIC
> registers solely for the purposes of migration.

There's an interesting design question lurking under this
about the extent to which you expose the h/w design split
between the CPU interface and the GIC proper as part of the
KVM APIs. I'm inclined to agree that it's better to for our
purposes treat both bits as just part of an irqchip device,
but I haven't given it a great deal of thought.

(Similarly in the QEMU emulated-GICv3 case you could also
split the CPU i/f more formally, or not. The kernel's choice
would have implications for which way QEMU ends up going,
I think.)

thanks
-- PMM
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Pavel Fedin Aug. 31, 2015, 7:43 a.m. UTC | #3
Hello!

> > I had imagined we would encode the GICv3 register accesses through the
> > device API and not through the system register API, since I'm not crazy
> > about polluting the general system register handling logic with GIC
> > registers solely for the purposes of migration.
> 
> There's an interesting design question lurking under this
> about the extent to which you expose the h/w design split
> between the CPU interface and the GIC proper as part of the
> KVM APIs.

 I could split up handling logic from access logic. So that in sys_regs.c we would have something like:

static bool access_gic_ctlr(struct kvm_vcpu *vcpu,
			    const struct sys_reg_params *p,
			    const struct sys_reg_desc *r)
{
	return vgicv3_access_ctlr(vcpu, vcpu_reg(vcpu, p->Rt), p->is_write);
}

 And in vgic-v3-emul.c we would have the handler itself with the prototype:

bool vgicv3_access_ctlr(struct kvm_vcpu *vcpu, u64 *val, bool write);

 Would this be OK?
 In my personal opinion system register access API fits perfectly well for this task, because after all these are system registers. And implementing this as device attribute would, i guess, give no difference from code's point of view. We would have to encode system register numbers into attribute, then perform table lookup, actually duplicating our system register access code. Does it worth that?

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoffer Dall Aug. 31, 2015, 9:01 a.m. UTC | #4
On Sun, Aug 30, 2015 at 07:39:05PM +0100, Peter Maydell wrote:
> On 30 August 2015 at 17:50, Christoffer Dall
> <christoffer.dall@linaro.org> wrote:
> > I had imagined we would encode the GICv3 register accesses through the
> > device API and not through the system register API, since I'm not crazy
> > about polluting the general system register handling logic with GIC
> > registers solely for the purposes of migration.
> 
> There's an interesting design question lurking under this
> about the extent to which you expose the h/w design split
> between the CPU interface and the GIC proper as part of the
> KVM APIs. I'm inclined to agree that it's better to for our
> purposes treat both bits as just part of an irqchip device,
> but I haven't given it a great deal of thought.

Me neither, and I intended to start a discussion around this with my
e-mail.

But as I stated above, I think it will be easier to manage if the state
belongs to the device, in general, but I have fairly little insight into
how this is going to be implemented in QEMU at this point.  But for the
GICv2 implementation, it certainly made a lot of sense to only deal with
one device and file descriptor when getting/setting the state.

> 
> (Similarly in the QEMU emulated-GICv3 case you could also
> split the CPU i/f more formally, or not. The kernel's choice
> would have implications for which way QEMU ends up going,
> I think.)
> 

Thanks,
-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoffer Dall Aug. 31, 2015, 9:03 a.m. UTC | #5
On Mon, Aug 31, 2015 at 10:43:27AM +0300, Pavel Fedin wrote:
>  Hello!
> 
> > > I had imagined we would encode the GICv3 register accesses through the
> > > device API and not through the system register API, since I'm not crazy
> > > about polluting the general system register handling logic with GIC
> > > registers solely for the purposes of migration.
> > 
> > There's an interesting design question lurking under this
> > about the extent to which you expose the h/w design split
> > between the CPU interface and the GIC proper as part of the
> > KVM APIs.
> 
>  I could split up handling logic from access logic. So that in sys_regs.c we would have something like:
> 
> static bool access_gic_ctlr(struct kvm_vcpu *vcpu,
> 			    const struct sys_reg_params *p,
> 			    const struct sys_reg_desc *r)
> {
> 	return vgicv3_access_ctlr(vcpu, vcpu_reg(vcpu, p->Rt), p->is_write);
> }
> 
>  And in vgic-v3-emul.c we would have the handler itself with the prototype:
> 
> bool vgicv3_access_ctlr(struct kvm_vcpu *vcpu, u64 *val, bool write);
> 
>  Would this be OK?
>  In my personal opinion system register access API fits perfectly well for this task, because after all these are system registers. And implementing this as device attribute would, i guess, give no difference from code's point of view. We would have to encode system register numbers into attribute, then perform table lookup, actually duplicating our system register access code. Does it worth that?
> 
I think it's worth moving the thing to device attributes, yes,
especially given that I never expect us to trap and emulate GICv3 system
register accesses from a guest in KVM.  Is that correct?

However, if there's a strong argument that this is really CPU state and
not state associated with the GIC device, I'd be willing to reconsider.

-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Pavel Fedin Aug. 31, 2015, 11:49 a.m. UTC | #6
Hello!

> I think it's worth moving the thing to device attributes, yes,
> especially given that I never expect us to trap and emulate GICv3 system
> register accesses from a guest in KVM.  Is that correct?

 Yes, but nevertheless, for GICv2 attributes we reuse the same code which is expected to trap MMIO
accesses from guest. And there we also have MMIO handlers for the CPU interface, which are also
never trapped from guest. So why cannot we do the same for GICv3 CPU interface, and simply reuse
existing APIs?
 I am currently working on full support in qemu, and it's not difficult to deal with CPU fd's.
Because anyway you have to iterate through all VCPUs in order to save state correctly.

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Pavel Fedin Sept. 1, 2015, 1:09 p.m. UTC | #7
Hello!

> Have you thought about proper locking/serializing of access to the GIC
> state in these accessor functions?  

 I am in the process of rewriting the whole thing, and i came to this point.
 What kind of locking  would you expect ? It's a CPU interface, it does not affect state of any
other vCPUs. And, since i am getting/setting its registers, i assume that the vCPU is not running.
Well, i added the check. What next?

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoffer Dall Sept. 1, 2015, 2:06 p.m. UTC | #8
On Tue, Sep 01, 2015 at 04:09:18PM +0300, Pavel Fedin wrote:
>  Hello!
> 
> > Have you thought about proper locking/serializing of access to the GIC
> > state in these accessor functions?  
> 
>  I am in the process of rewriting the whole thing, and i came to this point.
>  What kind of locking  would you expect ? It's a CPU interface, it does not affect state of any
> other vCPUs. And, since i am getting/setting its registers, i assume that the vCPU is not running.
> Well, i added the check. What next?
> 
I think we make some assumptions throughout the vgic code that only the
vcpu itself touches the state of the registers.  Maybe there is no need
for additional locking, but I'd sleep better at night if I knew that
whoever implemented save/restore logic had thought about concurrency.

-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 8cc4a5e..7a4f982 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -23,6 +23,7 @@ 
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
@@ -136,6 +137,162 @@  static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 	return true;
 }
 
+static bool access_gic_ctlr(struct kvm_vcpu *vcpu,
+			    const struct sys_reg_params *p,
+			    const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+		return false;
+
+	if (p->is_write) {
+		val = *vcpu_reg(vcpu, p->Rt);
+
+		vgicv3->vgic_vmcr &= ~(ICH_VMCR_CBPR|ICH_VMCR_EOIM);
+		vgicv3->vgic_vmcr |= (val << (ICH_VMCR_CBPR_SHIFT -
+						ICC_CTLR_EL1_CBPR_SHIFT)) &
+					ICH_VMCR_CBPR;
+		vgicv3->vgic_vmcr |= (val << (ICH_VMCR_EOIM_SHIFT -
+						ICC_CTLR_EL1_EOImode_SHIFT)) &
+					ICH_VMCR_EOIM;
+	} else {
+		asm volatile("mrs_s %0," __stringify(ICC_IAR1_EL1)
+			     : "=r" (val));
+		val &= (ICC_CTLR_EL1_A3V | ICC_CTLR_EL1_SEIS |
+			ICC_CTLR_EL1_IDbits_MASK | ICC_CTLR_EL1_PRIbits_MASK);
+		val |= (vgicv3->vgic_vmcr & ICH_VMCR_CBPR) >>
+			(ICH_VMCR_CBPR_SHIFT - ICC_CTLR_EL1_CBPR_SHIFT);
+		val |= (vgicv3->vgic_vmcr & ICH_VMCR_EOIM) >>
+			(ICH_VMCR_EOIM_SHIFT - ICC_CTLR_EL1_EOImode_SHIFT);
+
+		*vcpu_reg(vcpu, p->Rt) = val;
+	}
+
+	return true;
+}
+
+static bool access_gic_pmr(struct kvm_vcpu *vcpu,
+			   const struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+		return false;
+
+	if (p->is_write) {
+		val = *vcpu_reg(vcpu, p->Rt);
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_PMR_MASK;
+		vgicv3->vgic_vmcr |= (val << ICH_VMCR_PMR_SHIFT) &
+					ICH_VMCR_PMR_MASK;
+	} else {
+		val = (vgicv3->vgic_vmcr & ICH_VMCR_PMR_MASK) >>
+			ICH_VMCR_PMR_SHIFT;
+		*vcpu_reg(vcpu, p->Rt) = val;
+	}
+
+	return true;
+}
+
+static bool access_gic_bpr0(struct kvm_vcpu *vcpu,
+			    const struct sys_reg_params *p,
+			    const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+		return false;
+
+	if (p->is_write) {
+		val = *vcpu_reg(vcpu, p->Rt);
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR0_MASK;
+		vgicv3->vgic_vmcr |= (val << ICH_VMCR_BPR0_SHIFT) &
+					ICH_VMCR_BPR0_MASK;
+	} else {
+		val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR0_MASK) >>
+			ICH_VMCR_BPR0_SHIFT;
+		*vcpu_reg(vcpu, p->Rt) = val;
+	}
+
+	return true;
+}
+
+static bool access_gic_bpr1(struct kvm_vcpu *vcpu,
+			    const struct sys_reg_params *p,
+			    const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+		return false;
+
+	if (p->is_write) {
+		val = *vcpu_reg(vcpu, p->Rt);
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_BPR1_MASK;
+		vgicv3->vgic_vmcr |= (val << ICH_VMCR_BPR1_SHIFT) &
+					ICH_VMCR_BPR1_MASK;
+	} else {
+		val = (vgicv3->vgic_vmcr & ICH_VMCR_BPR1_MASK) >>
+			ICH_VMCR_BPR1_SHIFT;
+		*vcpu_reg(vcpu, p->Rt) = val;
+	}
+
+	return true;
+}
+
+static bool access_gic_grpen0(struct kvm_vcpu *vcpu,
+			      const struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+		return false;
+
+	if (p->is_write) {
+		val = *vcpu_reg(vcpu, p->Rt);
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG0;
+		vgicv3->vgic_vmcr |= (val << ICH_VMCR_ENG0_SHIFT) &
+					ICH_VMCR_ENG0;
+	} else {
+		val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG0) >>
+			ICH_VMCR_ENG0_SHIFT;
+		*vcpu_reg(vcpu, p->Rt) = val;
+	}
+
+	return true;
+}
+
+static bool access_gic_grpen1(struct kvm_vcpu *vcpu,
+			      const struct sys_reg_params *p,
+			      const struct sys_reg_desc *r)
+{
+	u64 val;
+	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	if (vcpu->kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+		return false;
+
+	if (p->is_write) {
+		val = *vcpu_reg(vcpu, p->Rt);
+		vgicv3->vgic_vmcr &= ~ICH_VMCR_ENG1;
+		vgicv3->vgic_vmcr |= (val << ICH_VMCR_ENG1_SHIFT) &
+					ICH_VMCR_ENG1;
+	} else {
+		val = (vgicv3->vgic_vmcr & ICH_VMCR_ENG1) >>
+			ICH_VMCR_ENG1_SHIFT;
+		*vcpu_reg(vcpu, p->Rt) = val;
+	}
+
+	return true;
+}
+
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 			const struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
@@ -579,6 +736,10 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
 	  access_vm_reg, reset_val, TCR_EL1, 0 },
 
+	/* ICC_PMR_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b0100), CRm(0b0110), Op2(0b000),
+	  access_gic_pmr },
+
 	/* AFSR0_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
 	  access_vm_reg, reset_unknown, AFSR0_EL1 },
@@ -613,12 +774,27 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
 	  NULL, reset_val, VBAR_EL1, 0 },
 
+	/* ICC_BPR0_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1000), Op2(0b011),
+	  access_gic_bpr0 },
 	/* ICC_SGI1R_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
 	  access_gic_sgi },
+	/* ICC_BPR1_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b011),
+	  access_gic_bpr1 },
+	/* ICC_CTLR_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b100),
+	  access_gic_ctlr },
 	/* ICC_SRE_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
 	  trap_raz_wi },
+	/* ICC_IGRPEN0_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b110),
+	  access_gic_grpen0 },
+	/* ICC_GRPEN1_EL1 */
+	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b111),
+	  access_gic_grpen1 },
 
 	/* CONTEXTIDR_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index ed0fc9f..7e9fc16 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -257,8 +257,14 @@ 
 /*
  * CPU interface registers
  */
-#define ICC_CTLR_EL1_EOImode_drop_dir	(0U << 1)
-#define ICC_CTLR_EL1_EOImode_drop	(1U << 1)
+#define ICC_CTLR_EL1_CBPR_SHIFT		0
+#define ICC_CTLR_EL1_EOImode_SHIFT	1
+#define ICC_CTLR_EL1_EOImode_drop_dir	(0U << ICC_CTLR_EL1_EOImode_SHIFT)
+#define ICC_CTLR_EL1_EOImode_drop	(1U << ICC_CTLR_EL1_EOImode_SHIFT)
+#define ICC_CTLR_EL1_PRIbits_MASK	(7U << 8)
+#define ICC_CTLR_EL1_IDbits_MASK	(7U << 11)
+#define ICC_CTLR_EL1_SEIS		(1U << 14)
+#define ICC_CTLR_EL1_A3V		(1U << 15)
 #define ICC_SRE_EL1_SRE			(1U << 0)
 
 /*
@@ -283,6 +289,14 @@ 
 
 #define ICH_VMCR_CTLR_SHIFT		0
 #define ICH_VMCR_CTLR_MASK		(0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_ENG0_SHIFT		0
+#define ICH_VMCR_ENG0			(1 << ICH_VMCR_ENG0_SHIFT)
+#define ICH_VMCR_ENG1_SHIFT		1
+#define ICH_VMCR_ENG1			(1 << ICH_VMCR_ENG1_SHIFT)
+#define ICH_VMCR_CBPR_SHIFT		4
+#define ICH_VMCR_CBPR			(1 << ICH_VMCR_CBPR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT		9
+#define ICH_VMCR_EOIM			(1 << ICH_VMCR_EOIM_SHIFT)
 #define ICH_VMCR_BPR1_SHIFT		18
 #define ICH_VMCR_BPR1_MASK		(7 << ICH_VMCR_BPR1_SHIFT)
 #define ICH_VMCR_BPR0_SHIFT		21