diff mbox series

[v3,2/3] ring_buffer: Change some static functions to bool

Message ID 20230303151706.57851-3-ubizjak@gmail.com (mailing list archive)
State Superseded
Headers show
Series Improve trace/ring_buffer.c | expand

Commit Message

Uros Bizjak March 3, 2023, 3:17 p.m. UTC
The return values of some functions are of boolean type. Change the
type of these function to bool and adjust their return values. Also
change type of some internal varibles to bool.

No functional change intended.

Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
v3: Rearrange variable declarations.
---
 kernel/trace/ring_buffer.c | 47 ++++++++++++++++++--------------------
 1 file changed, 22 insertions(+), 25 deletions(-)

Comments

Mukesh Ojha March 3, 2023, 4:34 p.m. UTC | #1
On 3/3/2023 8:47 PM, Uros Bizjak wrote:
> The return values of some functions are of boolean type. Change the
> type of these function to bool and adjust their return values. Also
> change type of some internal varibles to bool.
> 
> No functional change intended.
> 
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Masami Hiramatsu <mhiramat@kernel.org>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> ---
> v3: Rearrange variable declarations.
> ---
>   kernel/trace/ring_buffer.c | 47 ++++++++++++++++++--------------------
>   1 file changed, 22 insertions(+), 25 deletions(-)
> 
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 05fdc92554df..71df857242b4 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -163,7 +163,7 @@ enum {
>   #define extended_time(event) \
>   	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
>   
> -static inline int rb_null_event(struct ring_buffer_event *event)
> +static inline bool rb_null_event(struct ring_buffer_event *event)
>   {
>   	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
>   }
> @@ -367,11 +367,9 @@ static void free_buffer_page(struct buffer_page *bpage)
>   /*
>    * We need to fit the time_stamp delta into 27 bits.
>    */
> -static inline int test_time_stamp(u64 delta)
> +static inline bool test_time_stamp(u64 delta)
>   {
> -	if (delta & TS_DELTA_TEST)
> -		return 1;
> -	return 0;
> +	return !!(delta & TS_DELTA_TEST);
>   }
>   
>   #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
> @@ -700,7 +698,7 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
>   	return ret == expect;
>   }
>   
> -static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> +static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
>   {
>   	unsigned long cnt, top, bottom, msb;
>   	unsigned long cnt2, top2, bottom2, msb2;
> @@ -1490,7 +1488,7 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
>   	return NULL;
>   }
>   
> -static int rb_head_page_replace(struct buffer_page *old,
> +static bool rb_head_page_replace(struct buffer_page *old,
>   				struct buffer_page *new)

nit: did you miss ret in rb_get_reader_page()?

>   {
>   	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
> @@ -1917,7 +1915,7 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
>   	return local_read(&bpage->write) & RB_WRITE_MASK;
>   }
>   
> -static int
> +static bool
>   rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
>   {
>   	struct list_head *tail_page, *to_remove, *next_page;
> @@ -2030,12 +2028,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
>   	return nr_removed == 0;
>   }
>   
> -static int
> +static bool
>   rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
>   {
>   	struct list_head *pages = &cpu_buffer->new_pages;
> -	int retries, success;
>   	unsigned long flags;
> +	bool success;
> +	int retries;
>   
>   	/* Can be called at early boot up, where interrupts must not been enabled */
>   	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
> @@ -2054,7 +2053,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
>   	 * spinning.
>   	 */
>   	retries = 10;
> -	success = 0;
> +	success = false;
>   	while (retries--) {
>   		struct list_head *head_page, *prev_page, *r;
>   		struct list_head *last_page, *first_page;
> @@ -2083,7 +2082,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
>   			 * pointer to point to end of list
>   			 */
>   			head_page->prev = last_page;
> -			success = 1;
> +			success = true;
>   			break;
>   		}
>   	}
> @@ -2111,7 +2110,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
>   
>   static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
>   {
> -	int success;
> +	bool success;
>   
>   	if (cpu_buffer->nr_pages_to_update > 0)
>   		success = rb_insert_pages(cpu_buffer);
> @@ -2994,7 +2993,7 @@ static u64 rb_time_delta(struct ring_buffer_event *event)
>   	}
>   }
>   
> -static inline int
> +static inline bool
>   rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
>   		  struct ring_buffer_event *event)
>   {
> @@ -3015,7 +3014,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
>   	delta = rb_time_delta(event);
>   
>   	if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
> -		return 0;
> +		return false;
>   
>   	/* Make sure the write stamp is read before testing the location */
>   	barrier();
> @@ -3028,7 +3027,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
>   		/* Something came in, can't discard */
>   		if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
>   				       write_stamp, write_stamp - delta))
> -			return 0;
> +			return false;
>   
>   		/*
>   		 * It's possible that the event time delta is zero
> @@ -3061,12 +3060,12 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
>   		if (index == old_index) {
>   			/* update counters */
>   			local_sub(event_length, &cpu_buffer->entries_bytes);
> -			return 1;
> +			return true;
>   		}
>   	}
>   
>   	/* could not discard */
> -	return 0;
> +	return false;
>   }
>   
>   static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
> @@ -3281,7 +3280,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
>    * Note: The TRANSITION bit only handles a single transition between context.
>    */
>   
> -static __always_inline int
> +static __always_inline bool
>   trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
>   {
>   	unsigned int val = cpu_buffer->current_context;
> @@ -3298,14 +3297,14 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
>   		bit = RB_CTX_TRANSITION;
>   		if (val & (1 << (bit + cpu_buffer->nest))) {
>   			do_ring_buffer_record_recursion();
> -			return 1;
> +			return true;
>   		}
>   	}
>   
>   	val |= (1 << (bit + cpu_buffer->nest));
>   	cpu_buffer->current_context = val;
>   
> -	return 0;
> +	return false;
>   }
>   
>   static __always_inline void
> @@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
>   {
>   	struct ring_buffer_per_cpu *cpu_buffer;
>   	unsigned long flags;
> -	bool dolock;
> +	bool dolock, ret;
>   	int cpu;
> -	int ret;

nit: would have been nice if you would not have changed existing stuff 
i.e each variable on a separate line.

>   
>   	/* yes this is racy, but if you don't like the race, lock the buffer */
>   	for_each_buffer_cpu(buffer, cpu) {
> @@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
>   {
>   	struct ring_buffer_per_cpu *cpu_buffer;
>   	unsigned long flags;
> -	bool dolock;
> -	int ret;
> +	bool dolock, ret;

same here

>   
>   	if (!cpumask_test_cpu(cpu, buffer->cpumask))
>   		return true;


Feel free to add

Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>

-Mukesh
Steven Rostedt March 3, 2023, 5:07 p.m. UTC | #2
On Fri, 3 Mar 2023 22:04:01 +0530
Mukesh Ojha <quic_mojha@quicinc.com> wrote:

> >   static __always_inline void
> > @@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
> >   {
> >   	struct ring_buffer_per_cpu *cpu_buffer;
> >   	unsigned long flags;
> > -	bool dolock;
> > +	bool dolock, ret;
> >   	int cpu;
> > -	int ret;  
> 
> nit: would have been nice if you would not have changed existing stuff 
> i.e each variable on a separate line.

I agree. I prefer each variable on a separate line, as it makes fixing
"unused variable" warnings easier.

-- Steve


> 
> >   
> >   	/* yes this is racy, but if you don't like the race, lock the buffer */
> >   	for_each_buffer_cpu(buffer, cpu) {
> > @@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
> >   {
> >   	struct ring_buffer_per_cpu *cpu_buffer;
> >   	unsigned long flags;
> > -	bool dolock;
> > -	int ret;
> > +	bool dolock, ret;  
> 
> same here
Uros Bizjak March 4, 2023, 8:56 a.m. UTC | #3
On Fri, Mar 3, 2023 at 5:34 PM Mukesh Ojha <quic_mojha@quicinc.com> wrote:

> > -static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> > +static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> >   {
> >       unsigned long cnt, top, bottom, msb;
> >       unsigned long cnt2, top2, bottom2, msb2;
> > @@ -1490,7 +1488,7 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
> >       return NULL;
> >   }
> >
> > -static int rb_head_page_replace(struct buffer_page *old,
> > +static bool rb_head_page_replace(struct buffer_page *old,
> >                               struct buffer_page *new)
>
> nit: did you miss ret in rb_get_reader_page()?

Indeed, thanks for noticing!

> >   static __always_inline void
> > @@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
> >   {
> >       struct ring_buffer_per_cpu *cpu_buffer;
> >       unsigned long flags;
> > -     bool dolock;
> > +     bool dolock, ret;
> >       int cpu;
> > -     int ret;
>
> nit: would have been nice if you would not have changed existing stuff
> i.e each variable on a separate line.
> >
> >       /* yes this is racy, but if you don't like the race, lock the buffer */
> >       for_each_buffer_cpu(buffer, cpu) {
> > @@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
> >   {
> >       struct ring_buffer_per_cpu *cpu_buffer;
> >       unsigned long flags;
> > -     bool dolock;
> > -     int ret;
> > +     bool dolock, ret;
>
> same here

I'll do it in the way you propose in the follow-up revisiton.

> >
> >       if (!cpumask_test_cpu(cpu, buffer->cpumask))
> >               return true;
>
>
> Feel free to add
>
> Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>

Thanks.

Uros.
diff mbox series

Patch

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 05fdc92554df..71df857242b4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -163,7 +163,7 @@  enum {
 #define extended_time(event) \
 	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 
-static inline int rb_null_event(struct ring_buffer_event *event)
+static inline bool rb_null_event(struct ring_buffer_event *event)
 {
 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 }
@@ -367,11 +367,9 @@  static void free_buffer_page(struct buffer_page *bpage)
 /*
  * We need to fit the time_stamp delta into 27 bits.
  */
-static inline int test_time_stamp(u64 delta)
+static inline bool test_time_stamp(u64 delta)
 {
-	if (delta & TS_DELTA_TEST)
-		return 1;
-	return 0;
+	return !!(delta & TS_DELTA_TEST);
 }
 
 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
@@ -700,7 +698,7 @@  rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
 	return ret == expect;
 }
 
-static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
 {
 	unsigned long cnt, top, bottom, msb;
 	unsigned long cnt2, top2, bottom2, msb2;
@@ -1490,7 +1488,7 @@  rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 	return NULL;
 }
 
-static int rb_head_page_replace(struct buffer_page *old,
+static bool rb_head_page_replace(struct buffer_page *old,
 				struct buffer_page *new)
 {
 	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
@@ -1917,7 +1915,7 @@  static inline unsigned long rb_page_write(struct buffer_page *bpage)
 	return local_read(&bpage->write) & RB_WRITE_MASK;
 }
 
-static int
+static bool
 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 {
 	struct list_head *tail_page, *to_remove, *next_page;
@@ -2030,12 +2028,13 @@  rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 	return nr_removed == 0;
 }
 
-static int
+static bool
 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	struct list_head *pages = &cpu_buffer->new_pages;
-	int retries, success;
 	unsigned long flags;
+	bool success;
+	int retries;
 
 	/* Can be called at early boot up, where interrupts must not been enabled */
 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2054,7 +2053,7 @@  rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 	 * spinning.
 	 */
 	retries = 10;
-	success = 0;
+	success = false;
 	while (retries--) {
 		struct list_head *head_page, *prev_page, *r;
 		struct list_head *last_page, *first_page;
@@ -2083,7 +2082,7 @@  rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 			 * pointer to point to end of list
 			 */
 			head_page->prev = last_page;
-			success = 1;
+			success = true;
 			break;
 		}
 	}
@@ -2111,7 +2110,7 @@  rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
-	int success;
+	bool success;
 
 	if (cpu_buffer->nr_pages_to_update > 0)
 		success = rb_insert_pages(cpu_buffer);
@@ -2994,7 +2993,7 @@  static u64 rb_time_delta(struct ring_buffer_event *event)
 	}
 }
 
-static inline int
+static inline bool
 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 		  struct ring_buffer_event *event)
 {
@@ -3015,7 +3014,7 @@  rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 	delta = rb_time_delta(event);
 
 	if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
-		return 0;
+		return false;
 
 	/* Make sure the write stamp is read before testing the location */
 	barrier();
@@ -3028,7 +3027,7 @@  rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 		/* Something came in, can't discard */
 		if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
 				       write_stamp, write_stamp - delta))
-			return 0;
+			return false;
 
 		/*
 		 * It's possible that the event time delta is zero
@@ -3061,12 +3060,12 @@  rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 		if (index == old_index) {
 			/* update counters */
 			local_sub(event_length, &cpu_buffer->entries_bytes);
-			return 1;
+			return true;
 		}
 	}
 
 	/* could not discard */
-	return 0;
+	return false;
 }
 
 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
@@ -3281,7 +3280,7 @@  rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
  * Note: The TRANSITION bit only handles a single transition between context.
  */
 
-static __always_inline int
+static __always_inline bool
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	unsigned int val = cpu_buffer->current_context;
@@ -3298,14 +3297,14 @@  trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 		bit = RB_CTX_TRANSITION;
 		if (val & (1 << (bit + cpu_buffer->nest))) {
 			do_ring_buffer_record_recursion();
-			return 1;
+			return true;
 		}
 	}
 
 	val |= (1 << (bit + cpu_buffer->nest));
 	cpu_buffer->current_context = val;
 
-	return 0;
+	return false;
 }
 
 static __always_inline void
@@ -5408,9 +5407,8 @@  bool ring_buffer_empty(struct trace_buffer *buffer)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long flags;
-	bool dolock;
+	bool dolock, ret;
 	int cpu;
-	int ret;
 
 	/* yes this is racy, but if you don't like the race, lock the buffer */
 	for_each_buffer_cpu(buffer, cpu) {
@@ -5438,8 +5436,7 @@  bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	unsigned long flags;
-	bool dolock;
-	int ret;
+	bool dolock, ret;
 
 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
 		return true;