diff mbox series

[v2,1/3] blktrace: introduce 'blk_trace_{start,stop}' helper

Message ID 20221018145135.932240-2-yebin@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series fix possible memleak in '__blk_trace_remove' | expand

Commit Message

Ye Bin Oct. 18, 2022, 2:51 p.m. UTC
Introduce 'blk_trace_{start,stop}' helper. No functional changed.

Signed-off-by: Ye Bin <yebin@huaweicloud.com>
---
 kernel/trace/blktrace.c | 82 ++++++++++++++++++++++-------------------
 1 file changed, 44 insertions(+), 38 deletions(-)

Comments

Christoph Hellwig Oct. 18, 2022, 3:52 p.m. UTC | #1
> +static int blk_trace_start(struct blk_trace *bt)
> +{
> +	/*
> +	 * For starting a trace, we can transition from a setup or stopped
> +	 * trace.
> +	 */

That's pretty obvious from the check, isn't it?

> +	if (bt->trace_state == Blktrace_setup ||
> +	    bt->trace_state == Blktrace_stopped) {

I'd invert the check and return early from the function for the error
case so that the real starting code is in the main path.

> +static int blk_trace_stop(struct blk_trace *bt)
> +{
> +	/*
> +	 * For stopping a trace, the state must be running
> +	 */
> +	if (bt->trace_state == Blktrace_running) {

The same two comments apply here as well.
diff mbox series

Patch

diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7f5eb295fe19..f07a03c1e052 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -346,6 +346,45 @@  static void put_probe_ref(void)
 	mutex_unlock(&blk_probe_mutex);
 }
 
+static int blk_trace_start(struct blk_trace *bt)
+{
+	/*
+	 * For starting a trace, we can transition from a setup or stopped
+	 * trace.
+	 */
+	if (bt->trace_state == Blktrace_setup ||
+	    bt->trace_state == Blktrace_stopped) {
+		blktrace_seq++;
+		smp_mb();
+		bt->trace_state = Blktrace_running;
+		raw_spin_lock_irq(&running_trace_lock);
+		list_add(&bt->running_list, &running_trace_list);
+		raw_spin_unlock_irq(&running_trace_lock);
+
+		trace_note_time(bt);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int blk_trace_stop(struct blk_trace *bt)
+{
+	/*
+	 * For stopping a trace, the state must be running
+	 */
+	if (bt->trace_state == Blktrace_running) {
+		bt->trace_state = Blktrace_stopped;
+		raw_spin_lock_irq(&running_trace_lock);
+		list_del_init(&bt->running_list);
+		raw_spin_unlock_irq(&running_trace_lock);
+		relay_flush(bt->rchan);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
 {
 	synchronize_rcu();
@@ -658,7 +697,6 @@  static int compat_blk_trace_setup(struct request_queue *q, char *name,
 
 static int __blk_trace_startstop(struct request_queue *q, int start)
 {
-	int ret;
 	struct blk_trace *bt;
 
 	bt = rcu_dereference_protected(q->blk_trace,
@@ -666,36 +704,10 @@  static int __blk_trace_startstop(struct request_queue *q, int start)
 	if (bt == NULL)
 		return -EINVAL;
 
-	/*
-	 * For starting a trace, we can transition from a setup or stopped
-	 * trace. For stopping a trace, the state must be running
-	 */
-	ret = -EINVAL;
-	if (start) {
-		if (bt->trace_state == Blktrace_setup ||
-		    bt->trace_state == Blktrace_stopped) {
-			blktrace_seq++;
-			smp_mb();
-			bt->trace_state = Blktrace_running;
-			raw_spin_lock_irq(&running_trace_lock);
-			list_add(&bt->running_list, &running_trace_list);
-			raw_spin_unlock_irq(&running_trace_lock);
-
-			trace_note_time(bt);
-			ret = 0;
-		}
-	} else {
-		if (bt->trace_state == Blktrace_running) {
-			bt->trace_state = Blktrace_stopped;
-			raw_spin_lock_irq(&running_trace_lock);
-			list_del_init(&bt->running_list);
-			raw_spin_unlock_irq(&running_trace_lock);
-			relay_flush(bt->rchan);
-			ret = 0;
-		}
-	}
-
-	return ret;
+	if (start)
+		return blk_trace_start(bt);
+	else
+		return blk_trace_stop(bt);
 }
 
 int blk_trace_startstop(struct request_queue *q, int start)
@@ -1614,13 +1626,7 @@  static int blk_trace_remove_queue(struct request_queue *q)
 	if (bt == NULL)
 		return -EINVAL;
 
-	if (bt->trace_state == Blktrace_running) {
-		bt->trace_state = Blktrace_stopped;
-		raw_spin_lock_irq(&running_trace_lock);
-		list_del_init(&bt->running_list);
-		raw_spin_unlock_irq(&running_trace_lock);
-		relay_flush(bt->rchan);
-	}
+	blk_trace_stop(bt);
 
 	put_probe_ref();
 	synchronize_rcu();