diff mbox series

[v2,2/4] KVM: arm/arm64: vgic-v3: Add core support for Group0 SGIs

Message ID 20180810101422.3569-3-marc.zyngier@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm/arm64: vgic-v3: Group0 SGI support | expand

Commit Message

Marc Zyngier Aug. 10, 2018, 10:14 a.m. UTC
Although vgic-v3 now supports Group0 interrupts, it still doesn't
deal with Group0 SGIs. As usually with the GIC, nothing is simple:

- ICC_SGI1R can signal SGIs of both groups, since GICD_CTLR.DS==1
  with KVM (as per 8.1.10, Non-secure EL1 access)

- ICC_SGI0R can only generate Group0 SGIs

- ICC_ASGI1R sees its scope refocussed to generate only Group0
  SGIs (as per the note at the bottom of Table 8-14)

We only support Group1 SGIs so far, so no material change.

Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm/kvm/coproc.c            |  2 +-
 arch/arm64/kvm/sys_regs.c        |  2 +-
 include/kvm/arm_vgic.h           |  2 +-
 virt/kvm/arm/vgic/vgic-mmio-v3.c | 19 +++++++++++++++----
 4 files changed, 18 insertions(+), 7 deletions(-)

Comments

Christoffer Dall Aug. 11, 2018, 4:22 p.m. UTC | #1
On Fri, Aug 10, 2018 at 11:14:20AM +0100, Marc Zyngier wrote:
> Although vgic-v3 now supports Group0 interrupts, it still doesn't
> deal with Group0 SGIs. As usually with the GIC, nothing is simple:
> 
> - ICC_SGI1R can signal SGIs of both groups, since GICD_CTLR.DS==1
>   with KVM (as per 8.1.10, Non-secure EL1 access)
> 
> - ICC_SGI0R can only generate Group0 SGIs
> 
> - ICC_ASGI1R sees its scope refocussed to generate only Group0
>   SGIs (as per the note at the bottom of Table 8-14)
> 
> We only support Group1 SGIs so far, so no material change.
> 
> Reviewed-by: Eric Auger <eric.auger@redhat.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm/kvm/coproc.c            |  2 +-
>  arch/arm64/kvm/sys_regs.c        |  2 +-
>  include/kvm/arm_vgic.h           |  2 +-
>  virt/kvm/arm/vgic/vgic-mmio-v3.c | 19 +++++++++++++++----
>  4 files changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
> index 3a02e76699a6..b17c52608a19 100644
> --- a/arch/arm/kvm/coproc.c
> +++ b/arch/arm/kvm/coproc.c
> @@ -253,7 +253,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
>  	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
>  	reg |= *vcpu_reg(vcpu, p->Rt1) ;
>  
> -	vgic_v3_dispatch_sgi(vcpu, reg);
> +	vgic_v3_dispatch_sgi(vcpu, reg, true);
>  
>  	return true;
>  }
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index e04aacb2a24c..aba6755c816d 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -255,7 +255,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
>  	if (!p->is_write)
>  		return read_from_write_only(vcpu, p, r);
>  
> -	vgic_v3_dispatch_sgi(vcpu, p->regval);
> +	vgic_v3_dispatch_sgi(vcpu, p->regval, true);
>  
>  	return true;
>  }
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index c134790be32c..4f31f96bbfab 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -373,7 +373,7 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
>  void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
>  void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
>  
> -void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
> +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
>  
>  /**
>   * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
> diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
> index 88e78b582139..9482b4cbdd4f 100644
> --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
> +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
> @@ -900,7 +900,8 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
>  /**
>   * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
>   * @vcpu: The VCPU requesting a SGI
> - * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
> + * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
> + * @allow_group1: Does the sysreg access access allow generation of G0 SGIs

nit: access access

>   *
>   * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
>   * This will trap in sys_regs.c and call this function.
> @@ -910,7 +911,7 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
>   * check for matching ones. If this bit is set, we signal all, but not the
>   * calling VCPU.
>   */
> -void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
> +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
>  {
>  	struct kvm *kvm = vcpu->kvm;
>  	struct kvm_vcpu *c_vcpu;
> @@ -959,9 +960,19 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
>  		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
>  
>  		spin_lock_irqsave(&irq->irq_lock, flags);
> -		irq->pending_latch = true;
>  
> -		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
> +		/*
> +		 * An access targetting Group0 SGIs can only generate
> +		 * those, while an access targetting Group1 SGIs can
> +		 * generate interrupts of either group.
> +		 */
> +		if (!irq->group || allow_group1) {
> +			irq->pending_latch = true;
> +			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
> +		} else {
> +			spin_unlock_irqrestore(&irq->irq_lock, flags);
> +		}
> +
>  		vgic_put_irq(vcpu->kvm, irq);
>  	}
>  }
> -- 
> 2.18.0
> 

Despite the nit (which you can fix locally):

Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
diff mbox series

Patch

diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3a02e76699a6..b17c52608a19 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -253,7 +253,7 @@  static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 	reg |= *vcpu_reg(vcpu, p->Rt1) ;
 
-	vgic_v3_dispatch_sgi(vcpu, reg);
+	vgic_v3_dispatch_sgi(vcpu, reg, true);
 
 	return true;
 }
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e04aacb2a24c..aba6755c816d 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -255,7 +255,7 @@  static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 	if (!p->is_write)
 		return read_from_write_only(vcpu, p, r);
 
-	vgic_v3_dispatch_sgi(vcpu, p->regval);
+	vgic_v3_dispatch_sgi(vcpu, p->regval, true);
 
 	return true;
 }
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index c134790be32c..4f31f96bbfab 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -373,7 +373,7 @@  void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
 
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
 
 /**
  * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 88e78b582139..9482b4cbdd4f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -900,7 +900,8 @@  static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
 /**
  * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
  * @vcpu: The VCPU requesting a SGI
- * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
+ * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
+ * @allow_group1: Does the sysreg access access allow generation of G0 SGIs
  *
  * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
  * This will trap in sys_regs.c and call this function.
@@ -910,7 +911,7 @@  static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
  * check for matching ones. If this bit is set, we signal all, but not the
  * calling VCPU.
  */
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
 {
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_vcpu *c_vcpu;
@@ -959,9 +960,19 @@  void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
 		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
 
 		spin_lock_irqsave(&irq->irq_lock, flags);
-		irq->pending_latch = true;
 
-		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+		/*
+		 * An access targetting Group0 SGIs can only generate
+		 * those, while an access targetting Group1 SGIs can
+		 * generate interrupts of either group.
+		 */
+		if (!irq->group || allow_group1) {
+			irq->pending_latch = true;
+			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+		} else {
+			spin_unlock_irqrestore(&irq->irq_lock, flags);
+		}
+
 		vgic_put_irq(vcpu->kvm, irq);
 	}
 }