diff mbox series

[v2,4/8] seqlock, kcsan: Add annotations for KCSAN

Message ID 20191017141305.146193-5-elver@google.com (mailing list archive)
State New, archived
Headers show
Series Add Kernel Concurrency Sanitizer (KCSAN) | expand

Commit Message

Marco Elver Oct. 17, 2019, 2:13 p.m. UTC
Since seqlocks in the Linux kernel do not require the use of marked
atomic accesses in critical sections, we teach KCSAN to assume such
accesses are atomic. KCSAN currently also pretends that writes to
`sequence` are atomic, although currently plain writes are used (their
corresponding reads are READ_ONCE).

Further, to avoid false positives in the absence of clear ending of a
seqlock reader critical section (only when using the raw interface),
KCSAN assumes a fixed number of accesses after start of a seqlock
critical section are atomic.

Signed-off-by: Marco Elver <elver@google.com>
---
 include/linux/seqlock.h | 44 +++++++++++++++++++++++++++++++++++++----
 1 file changed, 40 insertions(+), 4 deletions(-)

Comments

Mark Rutland Oct. 24, 2019, 12:28 p.m. UTC | #1
On Thu, Oct 17, 2019 at 04:13:01PM +0200, Marco Elver wrote:
> Since seqlocks in the Linux kernel do not require the use of marked
> atomic accesses in critical sections, we teach KCSAN to assume such
> accesses are atomic. KCSAN currently also pretends that writes to
> `sequence` are atomic, although currently plain writes are used (their
> corresponding reads are READ_ONCE).
> 
> Further, to avoid false positives in the absence of clear ending of a
> seqlock reader critical section (only when using the raw interface),
> KCSAN assumes a fixed number of accesses after start of a seqlock
> critical section are atomic.

Do we have many examples where there's not a clear end to a seqlock
sequence? Or are there just a handful?

If there aren't that many, I wonder if we can make it mandatory to have
an explicit end, or to add some helper for those patterns so that we can
reliably hook them.

Thanks,
Mark.

> 
> Signed-off-by: Marco Elver <elver@google.com>
> ---
>  include/linux/seqlock.h | 44 +++++++++++++++++++++++++++++++++++++----
>  1 file changed, 40 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
> index bcf4cf26b8c8..1e425831a7ed 100644
> --- a/include/linux/seqlock.h
> +++ b/include/linux/seqlock.h
> @@ -37,8 +37,24 @@
>  #include <linux/preempt.h>
>  #include <linux/lockdep.h>
>  #include <linux/compiler.h>
> +#include <linux/kcsan.h>
>  #include <asm/processor.h>
>  
> +/*
> + * The seqlock interface does not prescribe a precise sequence of read
> + * begin/retry/end. For readers, typically there is a call to
> + * read_seqcount_begin() and read_seqcount_retry(), however, there are more
> + * esoteric cases which do not follow this pattern.
> + *
> + * As a consequence, we take the following best-effort approach for *raw* usage
> + * of seqlocks under KCSAN: upon beginning a seq-reader critical section,
> + * pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as
> + * atomics; if there is a matching read_seqcount_retry() call, no following
> + * memory operations are considered atomic. Non-raw usage of seqlocks is not
> + * affected.
> + */
> +#define KCSAN_SEQLOCK_REGION_MAX 1000
> +
>  /*
>   * Version using sequence counter only.
>   * This can be used when code has its own mutex protecting the
> @@ -115,6 +131,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
>  		cpu_relax();
>  		goto repeat;
>  	}
> +	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
>  	return ret;
>  }
>  
> @@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
>  {
>  	unsigned ret = READ_ONCE(s->sequence);
>  	smp_rmb();
> +	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
>  	return ret;
>  }
>  
> @@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
>  {
>  	unsigned ret = READ_ONCE(s->sequence);
>  	smp_rmb();
> +	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
>  	return ret & ~1;
>  }
>  
> @@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
>   */
>  static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
>  {
> -	return unlikely(s->sequence != start);
> +	kcsan_atomic_next(0);
> +	return unlikely(READ_ONCE(s->sequence) != start);
>  }
>  
>  /**
> @@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
>  
>  static inline void raw_write_seqcount_begin(seqcount_t *s)
>  {
> +	kcsan_begin_atomic(true);
>  	s->sequence++;
>  	smp_wmb();
>  }
> @@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
>  {
>  	smp_wmb();
>  	s->sequence++;
> +	kcsan_end_atomic(true);
>  }
>  
>  /**
> @@ -262,18 +284,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
>   *
>   *      void write(void)
>   *      {
> - *              Y = true;
> + *              WRITE_ONCE(Y, true);
>   *
>   *              raw_write_seqcount_barrier(seq);
>   *
> - *              X = false;
> + *              WRITE_ONCE(X, false);
>   *      }
>   */
>  static inline void raw_write_seqcount_barrier(seqcount_t *s)
>  {
> +	kcsan_begin_atomic(true);
>  	s->sequence++;
>  	smp_wmb();
>  	s->sequence++;
> +	kcsan_end_atomic(true);
>  }
>  
>  static inline int raw_read_seqcount_latch(seqcount_t *s)
> @@ -398,7 +422,9 @@ static inline void write_seqcount_end(seqcount_t *s)
>  static inline void write_seqcount_invalidate(seqcount_t *s)
>  {
>  	smp_wmb();
> +	kcsan_begin_atomic(true);
>  	s->sequence+=2;
> +	kcsan_end_atomic(true);
>  }
>  
>  typedef struct {
> @@ -430,11 +456,21 @@ typedef struct {
>   */
>  static inline unsigned read_seqbegin(const seqlock_t *sl)
>  {
> -	return read_seqcount_begin(&sl->seqcount);
> +	unsigned ret = read_seqcount_begin(&sl->seqcount);
> +
> +	kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry */
> +	kcsan_begin_atomic(false);
> +	return ret;
>  }
>  
>  static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
>  {
> +	/*
> +	 * Assume not nested: read_seqretry may be called multiple times when
> +	 * completing read critical section.
> +	 */
> +	kcsan_end_atomic(false);
> +
>  	return read_seqcount_retry(&sl->seqcount, start);
>  }
>  
> -- 
> 2.23.0.866.gb869b98d4c-goog
>
Marco Elver Oct. 24, 2019, 2:17 p.m. UTC | #2
On Thu, 24 Oct 2019 at 14:28, Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Thu, Oct 17, 2019 at 04:13:01PM +0200, Marco Elver wrote:
> > Since seqlocks in the Linux kernel do not require the use of marked
> > atomic accesses in critical sections, we teach KCSAN to assume such
> > accesses are atomic. KCSAN currently also pretends that writes to
> > `sequence` are atomic, although currently plain writes are used (their
> > corresponding reads are READ_ONCE).
> >
> > Further, to avoid false positives in the absence of clear ending of a
> > seqlock reader critical section (only when using the raw interface),
> > KCSAN assumes a fixed number of accesses after start of a seqlock
> > critical section are atomic.
>
> Do we have many examples where there's not a clear end to a seqlock
> sequence? Or are there just a handful?
>
> If there aren't that many, I wonder if we can make it mandatory to have
> an explicit end, or to add some helper for those patterns so that we can
> reliably hook them.

In an ideal world, all usage of seqlocks would be via seqlock_t, which
follows a somewhat saner usage, where we already do normal begin/end
markings -- with subtle exception to readers needing to be flat atomic
regions, e.g. because usage like this:
- fs/namespace.c:__legitimize_mnt - unbalanced read_seqretry
- fs/dcache.c:d_walk - unbalanced need_seqretry

But anything directly accessing seqcount_t seems to be unpredictable.
Filtering for usage of read_seqcount_retry not following 'do { .. }
while (read_seqcount_retry(..));' (although even the ones in while
loops aren't necessarily predictable):

$ git grep 'read_seqcount_retry' | grep -Ev 'seqlock.h|Doc|\* ' | grep
-v 'while ('
=> about 1/3 of the total read_seqcount_retry usage.

Just looking at fs/namei.c, I would conclude that it'd be a pretty
daunting task to prescribe and migrate to an interface that forces
clear begin/end.

Which is why I concluded that for now, it is probably better to make
KCSAN play well with the existing code.

Thanks,
-- Marco

> Thanks,
> Mark.
>
> >
> > Signed-off-by: Marco Elver <elver@google.com>
> > ---
> >  include/linux/seqlock.h | 44 +++++++++++++++++++++++++++++++++++++----
> >  1 file changed, 40 insertions(+), 4 deletions(-)
> >
> > diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
> > index bcf4cf26b8c8..1e425831a7ed 100644
> > --- a/include/linux/seqlock.h
> > +++ b/include/linux/seqlock.h
> > @@ -37,8 +37,24 @@
> >  #include <linux/preempt.h>
> >  #include <linux/lockdep.h>
> >  #include <linux/compiler.h>
> > +#include <linux/kcsan.h>
> >  #include <asm/processor.h>
> >
> > +/*
> > + * The seqlock interface does not prescribe a precise sequence of read
> > + * begin/retry/end. For readers, typically there is a call to
> > + * read_seqcount_begin() and read_seqcount_retry(), however, there are more
> > + * esoteric cases which do not follow this pattern.
> > + *
> > + * As a consequence, we take the following best-effort approach for *raw* usage
> > + * of seqlocks under KCSAN: upon beginning a seq-reader critical section,
> > + * pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as
> > + * atomics; if there is a matching read_seqcount_retry() call, no following
> > + * memory operations are considered atomic. Non-raw usage of seqlocks is not
> > + * affected.
> > + */
> > +#define KCSAN_SEQLOCK_REGION_MAX 1000
> > +
> >  /*
> >   * Version using sequence counter only.
> >   * This can be used when code has its own mutex protecting the
> > @@ -115,6 +131,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
> >               cpu_relax();
> >               goto repeat;
> >       }
> > +     kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
> >       return ret;
> >  }
> >
> > @@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
> >  {
> >       unsigned ret = READ_ONCE(s->sequence);
> >       smp_rmb();
> > +     kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
> >       return ret;
> >  }
> >
> > @@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
> >  {
> >       unsigned ret = READ_ONCE(s->sequence);
> >       smp_rmb();
> > +     kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
> >       return ret & ~1;
> >  }
> >
> > @@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
> >   */
> >  static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
> >  {
> > -     return unlikely(s->sequence != start);
> > +     kcsan_atomic_next(0);
> > +     return unlikely(READ_ONCE(s->sequence) != start);
> >  }
> >
> >  /**
> > @@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
> >
> >  static inline void raw_write_seqcount_begin(seqcount_t *s)
> >  {
> > +     kcsan_begin_atomic(true);
> >       s->sequence++;
> >       smp_wmb();
> >  }
> > @@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
> >  {
> >       smp_wmb();
> >       s->sequence++;
> > +     kcsan_end_atomic(true);
> >  }
> >
> >  /**
> > @@ -262,18 +284,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
> >   *
> >   *      void write(void)
> >   *      {
> > - *              Y = true;
> > + *              WRITE_ONCE(Y, true);
> >   *
> >   *              raw_write_seqcount_barrier(seq);
> >   *
> > - *              X = false;
> > + *              WRITE_ONCE(X, false);
> >   *      }
> >   */
> >  static inline void raw_write_seqcount_barrier(seqcount_t *s)
> >  {
> > +     kcsan_begin_atomic(true);
> >       s->sequence++;
> >       smp_wmb();
> >       s->sequence++;
> > +     kcsan_end_atomic(true);
> >  }
> >
> >  static inline int raw_read_seqcount_latch(seqcount_t *s)
> > @@ -398,7 +422,9 @@ static inline void write_seqcount_end(seqcount_t *s)
> >  static inline void write_seqcount_invalidate(seqcount_t *s)
> >  {
> >       smp_wmb();
> > +     kcsan_begin_atomic(true);
> >       s->sequence+=2;
> > +     kcsan_end_atomic(true);
> >  }
> >
> >  typedef struct {
> > @@ -430,11 +456,21 @@ typedef struct {
> >   */
> >  static inline unsigned read_seqbegin(const seqlock_t *sl)
> >  {
> > -     return read_seqcount_begin(&sl->seqcount);
> > +     unsigned ret = read_seqcount_begin(&sl->seqcount);
> > +
> > +     kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry */
> > +     kcsan_begin_atomic(false);
> > +     return ret;
> >  }
> >
> >  static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
> >  {
> > +     /*
> > +      * Assume not nested: read_seqretry may be called multiple times when
> > +      * completing read critical section.
> > +      */
> > +     kcsan_end_atomic(false);
> > +
> >       return read_seqcount_retry(&sl->seqcount, start);
> >  }
> >
> > --
> > 2.23.0.866.gb869b98d4c-goog
> >
Mark Rutland Oct. 24, 2019, 4:35 p.m. UTC | #3
On Thu, Oct 24, 2019 at 04:17:11PM +0200, Marco Elver wrote:
> On Thu, 24 Oct 2019 at 14:28, Mark Rutland <mark.rutland@arm.com> wrote:
> >
> > On Thu, Oct 17, 2019 at 04:13:01PM +0200, Marco Elver wrote:
> > > Since seqlocks in the Linux kernel do not require the use of marked
> > > atomic accesses in critical sections, we teach KCSAN to assume such
> > > accesses are atomic. KCSAN currently also pretends that writes to
> > > `sequence` are atomic, although currently plain writes are used (their
> > > corresponding reads are READ_ONCE).
> > >
> > > Further, to avoid false positives in the absence of clear ending of a
> > > seqlock reader critical section (only when using the raw interface),
> > > KCSAN assumes a fixed number of accesses after start of a seqlock
> > > critical section are atomic.
> >
> > Do we have many examples where there's not a clear end to a seqlock
> > sequence? Or are there just a handful?
> >
> > If there aren't that many, I wonder if we can make it mandatory to have
> > an explicit end, or to add some helper for those patterns so that we can
> > reliably hook them.
> 
> In an ideal world, all usage of seqlocks would be via seqlock_t, which
> follows a somewhat saner usage, where we already do normal begin/end
> markings -- with subtle exception to readers needing to be flat atomic
> regions, e.g. because usage like this:
> - fs/namespace.c:__legitimize_mnt - unbalanced read_seqretry
> - fs/dcache.c:d_walk - unbalanced need_seqretry
> 
> But anything directly accessing seqcount_t seems to be unpredictable.
> Filtering for usage of read_seqcount_retry not following 'do { .. }
> while (read_seqcount_retry(..));' (although even the ones in while
> loops aren't necessarily predictable):
> 
> $ git grep 'read_seqcount_retry' | grep -Ev 'seqlock.h|Doc|\* ' | grep
> -v 'while ('
> => about 1/3 of the total read_seqcount_retry usage.
> 
> Just looking at fs/namei.c, I would conclude that it'd be a pretty
> daunting task to prescribe and migrate to an interface that forces
> clear begin/end.
> 
> Which is why I concluded that for now, it is probably better to make
> KCSAN play well with the existing code.

Thanks for the detailed explanation, it's very helpful.

That all sounds reasonable to me -- could you fold some of that into the
commit message?

Thanks,
Mark.
Marco Elver Oct. 24, 2019, 5:09 p.m. UTC | #4
On Thu, 24 Oct 2019 at 18:35, Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Thu, Oct 24, 2019 at 04:17:11PM +0200, Marco Elver wrote:
> > On Thu, 24 Oct 2019 at 14:28, Mark Rutland <mark.rutland@arm.com> wrote:
> > >
> > > On Thu, Oct 17, 2019 at 04:13:01PM +0200, Marco Elver wrote:
> > > > Since seqlocks in the Linux kernel do not require the use of marked
> > > > atomic accesses in critical sections, we teach KCSAN to assume such
> > > > accesses are atomic. KCSAN currently also pretends that writes to
> > > > `sequence` are atomic, although currently plain writes are used (their
> > > > corresponding reads are READ_ONCE).
> > > >
> > > > Further, to avoid false positives in the absence of clear ending of a
> > > > seqlock reader critical section (only when using the raw interface),
> > > > KCSAN assumes a fixed number of accesses after start of a seqlock
> > > > critical section are atomic.
> > >
> > > Do we have many examples where there's not a clear end to a seqlock
> > > sequence? Or are there just a handful?
> > >
> > > If there aren't that many, I wonder if we can make it mandatory to have
> > > an explicit end, or to add some helper for those patterns so that we can
> > > reliably hook them.
> >
> > In an ideal world, all usage of seqlocks would be via seqlock_t, which
> > follows a somewhat saner usage, where we already do normal begin/end
> > markings -- with subtle exception to readers needing to be flat atomic
> > regions, e.g. because usage like this:
> > - fs/namespace.c:__legitimize_mnt - unbalanced read_seqretry
> > - fs/dcache.c:d_walk - unbalanced need_seqretry
> >
> > But anything directly accessing seqcount_t seems to be unpredictable.
> > Filtering for usage of read_seqcount_retry not following 'do { .. }
> > while (read_seqcount_retry(..));' (although even the ones in while
> > loops aren't necessarily predictable):
> >
> > $ git grep 'read_seqcount_retry' | grep -Ev 'seqlock.h|Doc|\* ' | grep
> > -v 'while ('
> > => about 1/3 of the total read_seqcount_retry usage.
> >
> > Just looking at fs/namei.c, I would conclude that it'd be a pretty
> > daunting task to prescribe and migrate to an interface that forces
> > clear begin/end.
> >
> > Which is why I concluded that for now, it is probably better to make
> > KCSAN play well with the existing code.
>
> Thanks for the detailed explanation, it's very helpful.
>
> That all sounds reasonable to me -- could you fold some of that into the
> commit message?

Thanks, will do. (I hope to have v3 ready by some time next week.)

-- Marco

> Thanks,
> Mark.
diff mbox series

Patch

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index bcf4cf26b8c8..1e425831a7ed 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,8 +37,24 @@ 
 #include <linux/preempt.h>
 #include <linux/lockdep.h>
 #include <linux/compiler.h>
+#include <linux/kcsan.h>
 #include <asm/processor.h>
 
+/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for *raw* usage
+ * of seqlocks under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Non-raw usage of seqlocks is not
+ * affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
 /*
  * Version using sequence counter only.
  * This can be used when code has its own mutex protecting the
@@ -115,6 +131,7 @@  static inline unsigned __read_seqcount_begin(const seqcount_t *s)
 		cpu_relax();
 		goto repeat;
 	}
+	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
 	return ret;
 }
 
@@ -131,6 +148,7 @@  static inline unsigned raw_read_seqcount(const seqcount_t *s)
 {
 	unsigned ret = READ_ONCE(s->sequence);
 	smp_rmb();
+	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
 	return ret;
 }
 
@@ -183,6 +201,7 @@  static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 {
 	unsigned ret = READ_ONCE(s->sequence);
 	smp_rmb();
+	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
 	return ret & ~1;
 }
 
@@ -202,7 +221,8 @@  static inline unsigned raw_seqcount_begin(const seqcount_t *s)
  */
 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
-	return unlikely(s->sequence != start);
+	kcsan_atomic_next(0);
+	return unlikely(READ_ONCE(s->sequence) != start);
 }
 
 /**
@@ -225,6 +245,7 @@  static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 
 static inline void raw_write_seqcount_begin(seqcount_t *s)
 {
+	kcsan_begin_atomic(true);
 	s->sequence++;
 	smp_wmb();
 }
@@ -233,6 +254,7 @@  static inline void raw_write_seqcount_end(seqcount_t *s)
 {
 	smp_wmb();
 	s->sequence++;
+	kcsan_end_atomic(true);
 }
 
 /**
@@ -262,18 +284,20 @@  static inline void raw_write_seqcount_end(seqcount_t *s)
  *
  *      void write(void)
  *      {
- *              Y = true;
+ *              WRITE_ONCE(Y, true);
  *
  *              raw_write_seqcount_barrier(seq);
  *
- *              X = false;
+ *              WRITE_ONCE(X, false);
  *      }
  */
 static inline void raw_write_seqcount_barrier(seqcount_t *s)
 {
+	kcsan_begin_atomic(true);
 	s->sequence++;
 	smp_wmb();
 	s->sequence++;
+	kcsan_end_atomic(true);
 }
 
 static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +422,9 @@  static inline void write_seqcount_end(seqcount_t *s)
 static inline void write_seqcount_invalidate(seqcount_t *s)
 {
 	smp_wmb();
+	kcsan_begin_atomic(true);
 	s->sequence+=2;
+	kcsan_end_atomic(true);
 }
 
 typedef struct {
@@ -430,11 +456,21 @@  typedef struct {
  */
 static inline unsigned read_seqbegin(const seqlock_t *sl)
 {
-	return read_seqcount_begin(&sl->seqcount);
+	unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+	kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry */
+	kcsan_begin_atomic(false);
+	return ret;
 }
 
 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 {
+	/*
+	 * Assume not nested: read_seqretry may be called multiple times when
+	 * completing read critical section.
+	 */
+	kcsan_end_atomic(false);
+
 	return read_seqcount_retry(&sl->seqcount, start);
 }