diff mbox

[RFC,v2,01/22] ARM: vGIC: introduce and initialize pending_irq lock

Message ID 20170721200010.29010-2-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara July 21, 2017, 7:59 p.m. UTC
Currently we protect the pending_irq structure with the corresponding
VGIC VCPU lock. There are problems in certain corner cases (for
instance if an IRQ is migrating), so let's introduce a per-IRQ lock,
which will protect the consistency of this structure independent from
any VCPU.
For now this just introduces and initializes the lock, also adds
wrapper macros to simplify its usage (and help debugging).

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 xen/arch/arm/vgic.c        |  1 +
 xen/include/asm-arm/vgic.h | 11 +++++++++++
 2 files changed, 12 insertions(+)

Comments

Julien Grall Aug. 10, 2017, 3:19 p.m. UTC | #1
Hi Andre,

On 21/07/17 20:59, Andre Przywara wrote:
> Currently we protect the pending_irq structure with the corresponding
> VGIC VCPU lock. There are problems in certain corner cases (for
> instance if an IRQ is migrating), so let's introduce a per-IRQ lock,
> which will protect the consistency of this structure independent from
> any VCPU.
> For now this just introduces and initializes the lock, also adds
> wrapper macros to simplify its usage (and help debugging).
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic.c        |  1 +
>  xen/include/asm-arm/vgic.h | 11 +++++++++++
>  2 files changed, 12 insertions(+)
>
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 1e5107b..38dacd3 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -69,6 +69,7 @@ void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
>      memset(p, 0, sizeof(*p));
>      INIT_LIST_HEAD(&p->inflight);
>      INIT_LIST_HEAD(&p->lr_queue);
> +    spin_lock_init(&p->lock);
>      p->irq = virq;
>      p->lpi_vcpu_id = INVALID_VCPU_ID;
>  }
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index d4ed23d..1c38b9a 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -90,6 +90,14 @@ struct pending_irq
>       * TODO: when implementing irq migration, taking only the current
>       * vgic lock is not going to be enough. */
>      struct list_head lr_queue;
> +    /* The lock protects the consistency of this structure. A single status bit
> +     * can be read and/or set without holding the lock using the atomic
> +     * set_bit/clear_bit/test_bit functions, however accessing multiple bits or
> +     * relating to other members in this struct requires the lock.
> +     * The list_head members are protected by their corresponding VCPU lock,
> +     * it is not sufficient to hold this pending_irq lock here to query or
> +     * change list order or affiliation. */

Coding style:

/*
  * Foo
  * Bar
  */

> +    spinlock_t lock;
>  };
>
>  #define NR_INTERRUPT_PER_RANK   32
> @@ -156,6 +164,9 @@ struct vgic_ops {
>  #define vgic_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.lock)
>  #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
>
> +#define vgic_irq_lock(p, flags) spin_lock_irqsave(&(p)->lock, flags)
> +#define vgic_irq_unlock(p, flags) spin_unlock_irqrestore(&(p)->lock, flags)
> +
>  #define vgic_lock_rank(v, r, flags)   spin_lock_irqsave(&(r)->lock, flags)
>  #define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags)
>
>

Cheers,
Julien Grall Aug. 10, 2017, 3:35 p.m. UTC | #2
Hi,

On 21/07/17 20:59, Andre Przywara wrote:
> Currently we protect the pending_irq structure with the corresponding
> VGIC VCPU lock. There are problems in certain corner cases (for
> instance if an IRQ is migrating), so let's introduce a per-IRQ lock,
> which will protect the consistency of this structure independent from
> any VCPU.
> For now this just introduces and initializes the lock, also adds
> wrapper macros to simplify its usage (and help debugging).
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  xen/arch/arm/vgic.c        |  1 +
>  xen/include/asm-arm/vgic.h | 11 +++++++++++
>  2 files changed, 12 insertions(+)
>
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 1e5107b..38dacd3 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -69,6 +69,7 @@ void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
>      memset(p, 0, sizeof(*p));
>      INIT_LIST_HEAD(&p->inflight);
>      INIT_LIST_HEAD(&p->lr_queue);
> +    spin_lock_init(&p->lock);
>      p->irq = virq;
>      p->lpi_vcpu_id = INVALID_VCPU_ID;
>  }
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index d4ed23d..1c38b9a 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -90,6 +90,14 @@ struct pending_irq
>       * TODO: when implementing irq migration, taking only the current
>       * vgic lock is not going to be enough. */
>      struct list_head lr_queue;
> +    /* The lock protects the consistency of this structure. A single status bit
> +     * can be read and/or set without holding the lock using the atomic
> +     * set_bit/clear_bit/test_bit functions, however accessing multiple bits or
> +     * relating to other members in this struct requires the lock.
> +     * The list_head members are protected by their corresponding VCPU lock,
> +     * it is not sufficient to hold this pending_irq lock here to query or
> +     * change list order or affiliation. */

Actually, I have on question here. Do the vCPU lock sufficient to 
protect the list_head members. Or do you also mandate the pending_irq to 
be locked as well?

Also, it would be good to have the locking order documented maybe in 
docs/misc?

> +    spinlock_t lock;
>  };
>
>  #define NR_INTERRUPT_PER_RANK   32
> @@ -156,6 +164,9 @@ struct vgic_ops {
>  #define vgic_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.lock)
>  #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
>
> +#define vgic_irq_lock(p, flags) spin_lock_irqsave(&(p)->lock, flags)
> +#define vgic_irq_unlock(p, flags) spin_unlock_irqrestore(&(p)->lock, flags)
> +
>  #define vgic_lock_rank(v, r, flags)   spin_lock_irqsave(&(r)->lock, flags)
>  #define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags)
>
>

Cheers,
Andre Przywara Aug. 16, 2017, 4:27 p.m. UTC | #3
Hi,

On 10/08/17 16:35, Julien Grall wrote:
> Hi,
> 
> On 21/07/17 20:59, Andre Przywara wrote:
>> Currently we protect the pending_irq structure with the corresponding
>> VGIC VCPU lock. There are problems in certain corner cases (for
>> instance if an IRQ is migrating), so let's introduce a per-IRQ lock,
>> which will protect the consistency of this structure independent from
>> any VCPU.
>> For now this just introduces and initializes the lock, also adds
>> wrapper macros to simplify its usage (and help debugging).
>>
>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>> ---
>>  xen/arch/arm/vgic.c        |  1 +
>>  xen/include/asm-arm/vgic.h | 11 +++++++++++
>>  2 files changed, 12 insertions(+)
>>
>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>> index 1e5107b..38dacd3 100644
>> --- a/xen/arch/arm/vgic.c
>> +++ b/xen/arch/arm/vgic.c
>> @@ -69,6 +69,7 @@ void vgic_init_pending_irq(struct pending_irq *p,
>> unsigned int virq)
>>      memset(p, 0, sizeof(*p));
>>      INIT_LIST_HEAD(&p->inflight);
>>      INIT_LIST_HEAD(&p->lr_queue);
>> +    spin_lock_init(&p->lock);
>>      p->irq = virq;
>>      p->lpi_vcpu_id = INVALID_VCPU_ID;
>>  }
>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>> index d4ed23d..1c38b9a 100644
>> --- a/xen/include/asm-arm/vgic.h
>> +++ b/xen/include/asm-arm/vgic.h
>> @@ -90,6 +90,14 @@ struct pending_irq
>>       * TODO: when implementing irq migration, taking only the current
>>       * vgic lock is not going to be enough. */
>>      struct list_head lr_queue;
>> +    /* The lock protects the consistency of this structure. A single
>> status bit
>> +     * can be read and/or set without holding the lock using the atomic
>> +     * set_bit/clear_bit/test_bit functions, however accessing
>> multiple bits or
>> +     * relating to other members in this struct requires the lock.
>> +     * The list_head members are protected by their corresponding
>> VCPU lock,
>> +     * it is not sufficient to hold this pending_irq lock here to
>> query or
>> +     * change list order or affiliation. */
> 
> Actually, I have on question here. Do the vCPU lock sufficient to
> protect the list_head members. Or do you also mandate the pending_irq to
> be locked as well?

For *manipulating* a list (removing or adding a pending_irq) you need to
hold both locks. We need the VCPU lock as the list head in struct vcpu
could change, and we need the per-IRQ lock to prevent a pending_irq to
be inserted into two lists at the same time (and also the list_head
member variables are changed).
However just *checking* whether a certain pending_irq is a member of a
list works with just holding the per-IRQ lock.

> Also, it would be good to have the locking order documented maybe in
> docs/misc?

Yes, I agree having a high level VGIC document (focussing on the locking
for the beginning) is a good idea.

Cheers,
Andre.

> 
>> +    spinlock_t lock;
>>  };
>>
>>  #define NR_INTERRUPT_PER_RANK   32
>> @@ -156,6 +164,9 @@ struct vgic_ops {
>>  #define vgic_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.lock)
>>  #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
>>
>> +#define vgic_irq_lock(p, flags) spin_lock_irqsave(&(p)->lock, flags)
>> +#define vgic_irq_unlock(p, flags) spin_unlock_irqrestore(&(p)->lock,
>> flags)
>> +
>>  #define vgic_lock_rank(v, r, flags)   spin_lock_irqsave(&(r)->lock,
>> flags)
>>  #define vgic_unlock_rank(v, r, flags)
>> spin_unlock_irqrestore(&(r)->lock, flags)
>>
>>
> 
> Cheers,
>
Julien Grall Aug. 16, 2017, 4:35 p.m. UTC | #4
On 16/08/17 17:27, Andre Przywara wrote:
> Hi,
>
> On 10/08/17 16:35, Julien Grall wrote:
>> Hi,
>>
>> On 21/07/17 20:59, Andre Przywara wrote:
>>> Currently we protect the pending_irq structure with the corresponding
>>> VGIC VCPU lock. There are problems in certain corner cases (for
>>> instance if an IRQ is migrating), so let's introduce a per-IRQ lock,
>>> which will protect the consistency of this structure independent from
>>> any VCPU.
>>> For now this just introduces and initializes the lock, also adds
>>> wrapper macros to simplify its usage (and help debugging).
>>>
>>> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
>>> ---
>>>  xen/arch/arm/vgic.c        |  1 +
>>>  xen/include/asm-arm/vgic.h | 11 +++++++++++
>>>  2 files changed, 12 insertions(+)
>>>
>>> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
>>> index 1e5107b..38dacd3 100644
>>> --- a/xen/arch/arm/vgic.c
>>> +++ b/xen/arch/arm/vgic.c
>>> @@ -69,6 +69,7 @@ void vgic_init_pending_irq(struct pending_irq *p,
>>> unsigned int virq)
>>>      memset(p, 0, sizeof(*p));
>>>      INIT_LIST_HEAD(&p->inflight);
>>>      INIT_LIST_HEAD(&p->lr_queue);
>>> +    spin_lock_init(&p->lock);
>>>      p->irq = virq;
>>>      p->lpi_vcpu_id = INVALID_VCPU_ID;
>>>  }
>>> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
>>> index d4ed23d..1c38b9a 100644
>>> --- a/xen/include/asm-arm/vgic.h
>>> +++ b/xen/include/asm-arm/vgic.h
>>> @@ -90,6 +90,14 @@ struct pending_irq
>>>       * TODO: when implementing irq migration, taking only the current
>>>       * vgic lock is not going to be enough. */
>>>      struct list_head lr_queue;
>>> +    /* The lock protects the consistency of this structure. A single
>>> status bit
>>> +     * can be read and/or set without holding the lock using the atomic
>>> +     * set_bit/clear_bit/test_bit functions, however accessing
>>> multiple bits or
>>> +     * relating to other members in this struct requires the lock.
>>> +     * The list_head members are protected by their corresponding
>>> VCPU lock,
>>> +     * it is not sufficient to hold this pending_irq lock here to
>>> query or
>>> +     * change list order or affiliation. */
>>
>> Actually, I have on question here. Do the vCPU lock sufficient to
>> protect the list_head members. Or do you also mandate the pending_irq to
>> be locked as well?
>
> For *manipulating* a list (removing or adding a pending_irq) you need to
> hold both locks. We need the VCPU lock as the list head in struct vcpu
> could change, and we need the per-IRQ lock to prevent a pending_irq to
> be inserted into two lists at the same time (and also the list_head
> member variables are changed).
> However just *checking* whether a certain pending_irq is a member of a
> list works with just holding the per-IRQ lock.

This does not seem to be inlined with the description above. It says "It 
is not sufficient to hold this pending_irq lock here to query...".

Also, there are a few places not taking both lock when updating the 
list. This is at least the case of:
	- vgic_clear_pending_irqs
	- gic_clear_pending_irqs
	- its_discard_event

So something has to be done to be the code inlined with the description.

Cheers,
diff mbox

Patch

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 1e5107b..38dacd3 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -69,6 +69,7 @@  void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
     memset(p, 0, sizeof(*p));
     INIT_LIST_HEAD(&p->inflight);
     INIT_LIST_HEAD(&p->lr_queue);
+    spin_lock_init(&p->lock);
     p->irq = virq;
     p->lpi_vcpu_id = INVALID_VCPU_ID;
 }
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index d4ed23d..1c38b9a 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -90,6 +90,14 @@  struct pending_irq
      * TODO: when implementing irq migration, taking only the current
      * vgic lock is not going to be enough. */
     struct list_head lr_queue;
+    /* The lock protects the consistency of this structure. A single status bit
+     * can be read and/or set without holding the lock using the atomic
+     * set_bit/clear_bit/test_bit functions, however accessing multiple bits or
+     * relating to other members in this struct requires the lock.
+     * The list_head members are protected by their corresponding VCPU lock,
+     * it is not sufficient to hold this pending_irq lock here to query or
+     * change list order or affiliation. */
+    spinlock_t lock;
 };
 
 #define NR_INTERRUPT_PER_RANK   32
@@ -156,6 +164,9 @@  struct vgic_ops {
 #define vgic_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.lock)
 #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
 
+#define vgic_irq_lock(p, flags) spin_lock_irqsave(&(p)->lock, flags)
+#define vgic_irq_unlock(p, flags) spin_unlock_irqrestore(&(p)->lock, flags)
+
 #define vgic_lock_rank(v, r, flags)   spin_lock_irqsave(&(r)->lock, flags)
 #define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags)