diff mbox series

[v3,5/6] xen/rcu: add assertions to debug build

Message ID 20200304063212.20843-6-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series [v3,1/6] xen/rcu: use rcu softirq for forcing quiescent state | expand

Commit Message

Jürgen Groß March 4, 2020, 6:32 a.m. UTC
Xen's RCU implementation relies on no softirq handling taking place
while being in a RCU critical section. Add ASSERT()s in debug builds
in order to catch any violations.

For that purpose modify rcu_read_[un]lock() to use a dedicated percpu
counter instead of preempt_[en|dis]able() as this enables to test
that condition in __do_softirq() (ASSERT_NOT_IN_ATOMIC() is not
usable there due to __cpu_up() calling process_pending_softirqs()
while holding the cpu hotplug lock).

Dropping the now no longer needed #include of preempt.h in rcupdate.h
requires adding it in some sources.

While at it switch the rcu_read_[un]lock() implementation to static
inline functions instead of macros.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V3:
- add barriers to rcu_[en|dis]able() (Roger Pau Monné)
- add rcu_quiesce_allowed() to ASSERT_NOT_IN_ATOMIC (Roger Pau Monné)
- convert macros to static inline functions
- add sanity check in rcu_read_unlock()
---
 xen/common/multicall.c     |  1 +
 xen/common/preempt.c       |  5 ++++-
 xen/common/rcupdate.c      |  4 ++++
 xen/common/softirq.c       |  2 ++
 xen/common/wait.c          |  1 +
 xen/include/xen/rcupdate.h | 45 +++++++++++++++++++++++++++++++++++++++++----
 6 files changed, 53 insertions(+), 5 deletions(-)

Comments

Julien Grall March 4, 2020, 1:42 p.m. UTC | #1
Hi,

On 04/03/2020 06:32, Juergen Gross wrote:
> diff --git a/xen/include/xen/rcupdate.h b/xen/include/xen/rcupdate.h
> index 31c8b86d13..9f6d420898 100644
> --- a/xen/include/xen/rcupdate.h
> +++ b/xen/include/xen/rcupdate.h
> @@ -34,10 +34,40 @@
>   #include <xen/cache.h>
>   #include <xen/spinlock.h>
>   #include <xen/cpumask.h>
> -#include <xen/preempt.h>
> +#include <xen/percpu.h>
> +#include <asm/atomic.h>
>   
>   #define __rcu
>   
> +#ifndef NDEBUG
> +DECLARE_PER_CPU(unsigned int, rcu_lock_cnt);
> +
> +static inline void rcu_quiesce_disable(void)
> +{
> +    this_cpu(rcu_lock_cnt)++;
> +    arch_lock_acquire_barrier();

I am not sure to understand the goal of this barrier. What are you 
trying to protect against?

Cheers,
Jürgen Groß March 6, 2020, 2:35 p.m. UTC | #2
On 04.03.20 14:42, Julien Grall wrote:
> Hi,
> 
> On 04/03/2020 06:32, Juergen Gross wrote:
>> diff --git a/xen/include/xen/rcupdate.h b/xen/include/xen/rcupdate.h
>> index 31c8b86d13..9f6d420898 100644
>> --- a/xen/include/xen/rcupdate.h
>> +++ b/xen/include/xen/rcupdate.h
>> @@ -34,10 +34,40 @@
>>   #include <xen/cache.h>
>>   #include <xen/spinlock.h>
>>   #include <xen/cpumask.h>
>> -#include <xen/preempt.h>
>> +#include <xen/percpu.h>
>> +#include <asm/atomic.h>
>>   #define __rcu
>> +#ifndef NDEBUG
>> +DECLARE_PER_CPU(unsigned int, rcu_lock_cnt);
>> +
>> +static inline void rcu_quiesce_disable(void)
>> +{
>> +    this_cpu(rcu_lock_cnt)++;
>> +    arch_lock_acquire_barrier();
> 
> I am not sure to understand the goal of this barrier. What are you 
> trying to protect against?

This is the result of a request by Roger, which seemed reasonable,
although I should have checked the suggested barrier type more
thoroughly.

He suggested to add barriers like in the former preempt_[en|dis]able()
cases, but to use the acquire and release barriers like in locks.

Thinking more about it I think a simple barrier() should do the trick as
only cpu local protection is needed.


Juergen
Julien Grall March 6, 2020, 4:08 p.m. UTC | #3
Hi,

On 06/03/2020 14:35, Jürgen Groß wrote:
> On 04.03.20 14:42, Julien Grall wrote:
>> Hi,
>>
>> On 04/03/2020 06:32, Juergen Gross wrote:
>>> diff --git a/xen/include/xen/rcupdate.h b/xen/include/xen/rcupdate.h
>>> index 31c8b86d13..9f6d420898 100644
>>> --- a/xen/include/xen/rcupdate.h
>>> +++ b/xen/include/xen/rcupdate.h
>>> @@ -34,10 +34,40 @@
>>>   #include <xen/cache.h>
>>>   #include <xen/spinlock.h>
>>>   #include <xen/cpumask.h>
>>> -#include <xen/preempt.h>
>>> +#include <xen/percpu.h>
>>> +#include <asm/atomic.h>
>>>   #define __rcu
>>> +#ifndef NDEBUG
>>> +DECLARE_PER_CPU(unsigned int, rcu_lock_cnt);
>>> +
>>> +static inline void rcu_quiesce_disable(void)
>>> +{
>>> +    this_cpu(rcu_lock_cnt)++;
>>> +    arch_lock_acquire_barrier();
>>
>> I am not sure to understand the goal of this barrier. What are you 
>> trying to protect against?
> 
> This is the result of a request by Roger, which seemed reasonable,
> although I should have checked the suggested barrier type more
> thoroughly.
> 
> He suggested to add barriers like in the former preempt_[en|dis]able()
> cases, but to use the acquire and release barriers like in locks.

I have CCed Roger as I don't understand why you would want memory 
ordering with all the CPUs on Arm.

> 
> Thinking more about it I think a simple barrier() should do the trick as
> only cpu local protection is needed.

Note that on Arm barrier() is only a compiler barrier. It does not 
prevent a CPU to re-order the memory access. But I think the barrier() 
ought to be fine in this case (although, I am not 100% sure).

Cheers,
diff mbox series

Patch

diff --git a/xen/common/multicall.c b/xen/common/multicall.c
index 5a199ebf8f..67f1a23485 100644
--- a/xen/common/multicall.c
+++ b/xen/common/multicall.c
@@ -10,6 +10,7 @@ 
 #include <xen/multicall.h>
 #include <xen/guest_access.h>
 #include <xen/perfc.h>
+#include <xen/preempt.h>
 #include <xen/trace.h>
 #include <asm/current.h>
 #include <asm/hardirq.h>
diff --git a/xen/common/preempt.c b/xen/common/preempt.c
index 3b4178fd44..8a351e644b 100644
--- a/xen/common/preempt.c
+++ b/xen/common/preempt.c
@@ -21,13 +21,15 @@ 
 
 #include <xen/preempt.h>
 #include <xen/irq.h>
+#include <xen/rcupdate.h>
 #include <asm/system.h>
 
 DEFINE_PER_CPU(unsigned int, __preempt_count);
 
 bool_t in_atomic(void)
 {
-    return preempt_count() || in_irq() || !local_irq_is_enabled();
+    return preempt_count() || in_irq() || !local_irq_is_enabled() ||
+           !rcu_quiesce_allowed();
 }
 
 #ifndef NDEBUG
@@ -36,5 +38,6 @@  void ASSERT_NOT_IN_ATOMIC(void)
     ASSERT(!preempt_count());
     ASSERT(!in_irq());
     ASSERT(local_irq_is_enabled());
+    ASSERT(rcu_quiesce_allowed());
 }
 #endif
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index 27d597bbeb..d1cc2f0a98 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -46,6 +46,10 @@ 
 #include <xen/cpu.h>
 #include <xen/stop_machine.h>
 
+#ifndef NDEBUG
+DEFINE_PER_CPU(unsigned int, rcu_lock_cnt);
+#endif
+
 /* Global control variables for rcupdate callback mechanism. */
 static struct rcu_ctrlblk {
     long cur;           /* Current batch number.                      */
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 30beb27ae9..fd90b8511d 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -30,6 +30,8 @@  static void __do_softirq(unsigned long ignore_mask, bool rcu_allowed)
     unsigned int i, cpu;
     unsigned long pending;
 
+    ASSERT(!rcu_allowed || rcu_quiesce_allowed());
+
     for ( ; ; )
     {
         /*
diff --git a/xen/common/wait.c b/xen/common/wait.c
index 24716e7676..9cdb174036 100644
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -19,6 +19,7 @@ 
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <xen/preempt.h>
 #include <xen/sched.h>
 #include <xen/softirq.h>
 #include <xen/wait.h>
diff --git a/xen/include/xen/rcupdate.h b/xen/include/xen/rcupdate.h
index 31c8b86d13..9f6d420898 100644
--- a/xen/include/xen/rcupdate.h
+++ b/xen/include/xen/rcupdate.h
@@ -34,10 +34,40 @@ 
 #include <xen/cache.h>
 #include <xen/spinlock.h>
 #include <xen/cpumask.h>
-#include <xen/preempt.h>
+#include <xen/percpu.h>
+#include <asm/atomic.h>
 
 #define __rcu
 
+#ifndef NDEBUG
+DECLARE_PER_CPU(unsigned int, rcu_lock_cnt);
+
+static inline void rcu_quiesce_disable(void)
+{
+    this_cpu(rcu_lock_cnt)++;
+    arch_lock_acquire_barrier();
+}
+
+static inline void rcu_quiesce_enable(void)
+{
+    arch_lock_release_barrier();
+    this_cpu(rcu_lock_cnt)--;
+}
+
+static inline bool rcu_quiesce_allowed(void)
+{
+    return !this_cpu(rcu_lock_cnt);
+}
+
+#else
+static inline void rcu_quiesce_disable(void) { }
+static inline void rcu_quiesce_enable(void) { }
+static inline bool rcu_quiesce_allowed(void)
+{
+    return true;
+}
+#endif
+
 /**
  * struct rcu_head - callback structure for use with RCU
  * @next: next update requests in a list
@@ -91,16 +121,23 @@  typedef struct _rcu_read_lock rcu_read_lock_t;
  * will be deferred until the outermost RCU read-side critical section
  * completes.
  *
- * It is illegal to block while in an RCU read-side critical section.
+ * It is illegal to process softirqs while in an RCU read-side critical section.
  */
-#define rcu_read_lock(x)       ({ ((void)(x)); preempt_disable(); })
+static inline void rcu_read_lock(rcu_read_lock_t *lock)
+{
+    rcu_quiesce_disable();
+}
 
 /**
  * rcu_read_unlock - marks the end of an RCU read-side critical section.
  *
  * See rcu_read_lock() for more information.
  */
-#define rcu_read_unlock(x)     ({ ((void)(x)); preempt_enable(); })
+static inline void rcu_read_unlock(rcu_read_lock_t *lock)
+{
+    ASSERT(!rcu_quiesce_allowed());
+    rcu_quiesce_enable();
+}
 
 /*
  * So where is rcu_write_lock()?  It does not exist, as there is no