diff mbox series

[06/15] stackdepot: fix and clean-up atomic annotations

Message ID 8ad8f778b43dab49e4e6214b8d90bed31b75436f.1693328501.git.andreyknvl@google.com (mailing list archive)
State New
Headers show
Series stackdepot: allow evicting stack traces | expand

Commit Message

andrey.konovalov@linux.dev Aug. 29, 2023, 5:11 p.m. UTC
From: Andrey Konovalov <andreyknvl@google.com>

Simplify comments accompanying the use of atomic accesses in the
stack depot code.

Also turn smp_load_acquire from next_pool_required in depot_init_pool
into READ_ONCE, as both depot_init_pool and the all smp_store_release's
to this variable are executed under the stack depot lock.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>

---

This patch is not strictly required, as the atomic accesses are fully
removed in one of the latter patches. However, I decided to keep the
patch just in case we end up needing these atomics in the following
iterations of this series.
---
 lib/stackdepot.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

Comments

Marco Elver Aug. 30, 2023, 8:34 a.m. UTC | #1
On Tue, Aug 29, 2023 at 07:11PM +0200, andrey.konovalov@linux.dev wrote:
> From: Andrey Konovalov <andreyknvl@google.com>
> 
> Simplify comments accompanying the use of atomic accesses in the
> stack depot code.
> 
> Also turn smp_load_acquire from next_pool_required in depot_init_pool
> into READ_ONCE, as both depot_init_pool and the all smp_store_release's
> to this variable are executed under the stack depot lock.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> 
> ---
> 
> This patch is not strictly required, as the atomic accesses are fully
> removed in one of the latter patches. However, I decided to keep the
> patch just in case we end up needing these atomics in the following
> iterations of this series.
> ---
>  lib/stackdepot.c | 27 +++++++++++++--------------
>  1 file changed, 13 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 93191ee70fc3..9ae71e1ef1a7 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -226,10 +226,10 @@ static void depot_init_pool(void **prealloc)
>  	/*
>  	 * If the next pool is already initialized or the maximum number of
>  	 * pools is reached, do not use the preallocated memory.
> -	 * smp_load_acquire() here pairs with smp_store_release() below and
> -	 * in depot_alloc_stack().
> +	 * READ_ONCE is only used to mark the variable as atomic,
> +	 * there are no concurrent writes.

This doesn't make sense. If there are no concurrent writes, we should
drop the marking, so that if there are concurrent writes, tools like
KCSAN can tell us about it if our assumption was wrong.

>  	 */
> -	if (!smp_load_acquire(&next_pool_required))
> +	if (!READ_ONCE(next_pool_required))
>  		return;
>  
>  	/* Check if the current pool is not yet allocated. */
> @@ -250,8 +250,8 @@ static void depot_init_pool(void **prealloc)
>  		 * At this point, either the next pool is initialized or the
>  		 * maximum number of pools is reached. In either case, take
>  		 * note that initializing another pool is not required.
> -		 * This smp_store_release pairs with smp_load_acquire() above
> -		 * and in stack_depot_save().
> +		 * smp_store_release pairs with smp_load_acquire in
> +		 * stack_depot_save.
>  		 */
>  		smp_store_release(&next_pool_required, 0);
>  	}
> @@ -275,15 +275,15 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
>  		/*
>  		 * Move on to the next pool.
>  		 * WRITE_ONCE pairs with potential concurrent read in
> -		 * stack_depot_fetch().
> +		 * stack_depot_fetch.
>  		 */
>  		WRITE_ONCE(pool_index, pool_index + 1);
>  		pool_offset = 0;
>  		/*
>  		 * If the maximum number of pools is not reached, take note
>  		 * that the next pool needs to initialized.
> -		 * smp_store_release() here pairs with smp_load_acquire() in
> -		 * stack_depot_save() and depot_init_pool().
> +		 * smp_store_release pairs with smp_load_acquire in
> +		 * stack_depot_save.
>  		 */
>  		if (pool_index + 1 < DEPOT_MAX_POOLS)
>  			smp_store_release(&next_pool_required, 1);
> @@ -414,8 +414,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>  
>  	/*
>  	 * Fast path: look the stack trace up without locking.
> -	 * The smp_load_acquire() here pairs with smp_store_release() to
> -	 * |bucket| below.
> +	 * smp_load_acquire pairs with smp_store_release to |bucket| below.
>  	 */
>  	found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
>  	if (found)
> @@ -425,8 +424,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>  	 * Check if another stack pool needs to be initialized. If so, allocate
>  	 * the memory now - we won't be able to do that under the lock.
>  	 *
> -	 * The smp_load_acquire() here pairs with smp_store_release() to
> -	 * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
> +	 * smp_load_acquire pairs with smp_store_release
> +	 * in depot_alloc_stack and depot_init_pool.

Reflow comment to match 80 cols used by comments elsewhere.

>  	 */
>  	if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
>  		/*
> @@ -452,8 +451,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>  		if (new) {
>  			new->next = *bucket;
>  			/*
> -			 * This smp_store_release() pairs with
> -			 * smp_load_acquire() from |bucket| above.
> +			 * smp_store_release pairs with smp_load_acquire
> +			 * from |bucket| above.
>  			 */
>  			smp_store_release(bucket, new);
>  			found = new;
> -- 
> 2.25.1
>
Andrey Konovalov Sept. 4, 2023, 6:45 p.m. UTC | #2
On Wed, Aug 30, 2023 at 10:34 AM Marco Elver <elver@google.com> wrote:
>
> > diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> > index 93191ee70fc3..9ae71e1ef1a7 100644
> > --- a/lib/stackdepot.c
> > +++ b/lib/stackdepot.c
> > @@ -226,10 +226,10 @@ static void depot_init_pool(void **prealloc)
> >       /*
> >        * If the next pool is already initialized or the maximum number of
> >        * pools is reached, do not use the preallocated memory.
> > -      * smp_load_acquire() here pairs with smp_store_release() below and
> > -      * in depot_alloc_stack().
> > +      * READ_ONCE is only used to mark the variable as atomic,
> > +      * there are no concurrent writes.
>
> This doesn't make sense. If there are no concurrent writes, we should
> drop the marking, so that if there are concurrent writes, tools like
> KCSAN can tell us about it if our assumption was wrong.

Makes sense, will do in v2.

> > @@ -425,8 +424,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
> >        * Check if another stack pool needs to be initialized. If so, allocate
> >        * the memory now - we won't be able to do that under the lock.
> >        *
> > -      * The smp_load_acquire() here pairs with smp_store_release() to
> > -      * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
> > +      * smp_load_acquire pairs with smp_store_release
> > +      * in depot_alloc_stack and depot_init_pool.
>
> Reflow comment to match 80 cols used by comments elsewhere.

Will do in v2.

Thanks!
diff mbox series

Patch

diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 93191ee70fc3..9ae71e1ef1a7 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -226,10 +226,10 @@  static void depot_init_pool(void **prealloc)
 	/*
 	 * If the next pool is already initialized or the maximum number of
 	 * pools is reached, do not use the preallocated memory.
-	 * smp_load_acquire() here pairs with smp_store_release() below and
-	 * in depot_alloc_stack().
+	 * READ_ONCE is only used to mark the variable as atomic,
+	 * there are no concurrent writes.
 	 */
-	if (!smp_load_acquire(&next_pool_required))
+	if (!READ_ONCE(next_pool_required))
 		return;
 
 	/* Check if the current pool is not yet allocated. */
@@ -250,8 +250,8 @@  static void depot_init_pool(void **prealloc)
 		 * At this point, either the next pool is initialized or the
 		 * maximum number of pools is reached. In either case, take
 		 * note that initializing another pool is not required.
-		 * This smp_store_release pairs with smp_load_acquire() above
-		 * and in stack_depot_save().
+		 * smp_store_release pairs with smp_load_acquire in
+		 * stack_depot_save.
 		 */
 		smp_store_release(&next_pool_required, 0);
 	}
@@ -275,15 +275,15 @@  depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
 		/*
 		 * Move on to the next pool.
 		 * WRITE_ONCE pairs with potential concurrent read in
-		 * stack_depot_fetch().
+		 * stack_depot_fetch.
 		 */
 		WRITE_ONCE(pool_index, pool_index + 1);
 		pool_offset = 0;
 		/*
 		 * If the maximum number of pools is not reached, take note
 		 * that the next pool needs to initialized.
-		 * smp_store_release() here pairs with smp_load_acquire() in
-		 * stack_depot_save() and depot_init_pool().
+		 * smp_store_release pairs with smp_load_acquire in
+		 * stack_depot_save.
 		 */
 		if (pool_index + 1 < DEPOT_MAX_POOLS)
 			smp_store_release(&next_pool_required, 1);
@@ -414,8 +414,7 @@  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 
 	/*
 	 * Fast path: look the stack trace up without locking.
-	 * The smp_load_acquire() here pairs with smp_store_release() to
-	 * |bucket| below.
+	 * smp_load_acquire pairs with smp_store_release to |bucket| below.
 	 */
 	found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
 	if (found)
@@ -425,8 +424,8 @@  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 	 * Check if another stack pool needs to be initialized. If so, allocate
 	 * the memory now - we won't be able to do that under the lock.
 	 *
-	 * The smp_load_acquire() here pairs with smp_store_release() to
-	 * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+	 * smp_load_acquire pairs with smp_store_release
+	 * in depot_alloc_stack and depot_init_pool.
 	 */
 	if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
 		/*
@@ -452,8 +451,8 @@  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 		if (new) {
 			new->next = *bucket;
 			/*
-			 * This smp_store_release() pairs with
-			 * smp_load_acquire() from |bucket| above.
+			 * smp_store_release pairs with smp_load_acquire
+			 * from |bucket| above.
 			 */
 			smp_store_release(bucket, new);
 			found = new;