diff mbox series

[v3,1/3] ring_buffer: Change some static functions to void

Message ID 20230303151706.57851-2-ubizjak@gmail.com (mailing list archive)
State Superseded
Headers show
Series Improve trace/ring_buffer.c | expand

Commit Message

Uros Bizjak March 3, 2023, 3:17 p.m. UTC
The results of some static functions are not used. Change the
type of these function to void and remove unnecessary returns.

No functional change intended.

Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
---
 kernel/trace/ring_buffer.c | 22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

Comments

Mukesh Ojha March 3, 2023, 4:16 p.m. UTC | #1
On 3/3/2023 8:47 PM, Uros Bizjak wrote:
> The results of some static functions are not used. Change the
> type of these function to void and remove unnecessary returns.
> 
> No functional change intended.
> 
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
> ---
>   kernel/trace/ring_buffer.c | 22 +++++++---------------
>   1 file changed, 7 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index af50d931b020..05fdc92554df 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -1569,15 +1569,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
>   	}
>   }
>   
> -static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
> +static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
>   			  struct buffer_page *bpage)
>   {
>   	unsigned long val = (unsigned long)bpage;
>   
> -	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
> -		return 1;
> -
> -	return 0;
> +	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
>   }
>   
>   /**
> @@ -1587,30 +1584,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
>    * As a safety measure we check to make sure the data pages have not
>    * been corrupted.
>    */
> -static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> +static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
>   {
>   	struct list_head *head = rb_list_head(cpu_buffer->pages);
>   	struct list_head *tmp;
>   
>   	if (RB_WARN_ON(cpu_buffer,
>   			rb_list_head(rb_list_head(head->next)->prev) != head))
> -		return -1;
> +		return;
>   
>   	if (RB_WARN_ON(cpu_buffer,
>   			rb_list_head(rb_list_head(head->prev)->next) != head))
> -		return -1;
> +		return;
>   
>   	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
>   		if (RB_WARN_ON(cpu_buffer,
>   				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
> -			return -1;
> +			return;
>   
>   		if (RB_WARN_ON(cpu_buffer,
>   				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
> -			return -1;
> +			return;
>   	}
> -
> -	return 0;
>   }
>   
>   static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> @@ -4500,7 +4495,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
>   	default:
>   		RB_WARN_ON(cpu_buffer, 1);
>   	}
> -	return;
>   }
>   
>   static void
> @@ -4531,7 +4525,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
>   	default:
>   		RB_WARN_ON(iter->cpu_buffer, 1);
>   	}
> -	return;
>   }
>   
>   static struct buffer_page *
> @@ -4946,7 +4939,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
>   {
>   	if (likely(locked))
>   		raw_spin_unlock(&cpu_buffer->reader_lock);
> -	return;
>   }
>   

Nice clean up, thanks.

Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>

-Mukesh
>   /**
diff mbox series

Patch

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index af50d931b020..05fdc92554df 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1569,15 +1569,12 @@  static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
 	}
 }
 
-static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
 			  struct buffer_page *bpage)
 {
 	unsigned long val = (unsigned long)bpage;
 
-	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
-		return 1;
-
-	return 0;
+	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
 }
 
 /**
@@ -1587,30 +1584,28 @@  static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  * As a safety measure we check to make sure the data pages have not
  * been corrupted.
  */
-static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	struct list_head *head = rb_list_head(cpu_buffer->pages);
 	struct list_head *tmp;
 
 	if (RB_WARN_ON(cpu_buffer,
 			rb_list_head(rb_list_head(head->next)->prev) != head))
-		return -1;
+		return;
 
 	if (RB_WARN_ON(cpu_buffer,
 			rb_list_head(rb_list_head(head->prev)->next) != head))
-		return -1;
+		return;
 
 	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
 		if (RB_WARN_ON(cpu_buffer,
 				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
-			return -1;
+			return;
 
 		if (RB_WARN_ON(cpu_buffer,
 				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
-			return -1;
+			return;
 	}
-
-	return 0;
 }
 
 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
@@ -4500,7 +4495,6 @@  rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
 	default:
 		RB_WARN_ON(cpu_buffer, 1);
 	}
-	return;
 }
 
 static void
@@ -4531,7 +4525,6 @@  rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
 	default:
 		RB_WARN_ON(iter->cpu_buffer, 1);
 	}
-	return;
 }
 
 static struct buffer_page *
@@ -4946,7 +4939,6 @@  rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
 {
 	if (likely(locked))
 		raw_spin_unlock(&cpu_buffer->reader_lock);
-	return;
 }
 
 /**