diff mbox series

[v3,35/57] perf: Simplify *perf_event_read*()

Message ID 20230612093540.037803940@infradead.org (mailing list archive)
State New, archived
Headers show
Series Scope-based Resource Management | expand

Commit Message

Peter Zijlstra June 12, 2023, 9:07 a.m. UTC
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/events/core.c |   54 ++++++++++++++++-----------------------------------
 1 file changed, 17 insertions(+), 37 deletions(-)
diff mbox series

Patch

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4435,7 +4435,8 @@  static void __perf_event_read(void *info
 	if (ctx->task && cpuctx->task_ctx != ctx)
 		return;
 
-	raw_spin_lock(&ctx->lock);
+	guard(raw_spinlock)(&ctx->lock);
+
 	if (ctx->is_active & EVENT_TIME) {
 		update_context_time(ctx);
 		update_cgrp_time_from_event(event);
@@ -4446,12 +4447,12 @@  static void __perf_event_read(void *info
 		perf_event_update_sibling_time(event);
 
 	if (event->state != PERF_EVENT_STATE_ACTIVE)
-		goto unlock;
+		return;
 
 	if (!data->group) {
 		pmu->read(event);
 		data->ret = 0;
-		goto unlock;
+		return;
 	}
 
 	pmu->start_txn(pmu, PERF_PMU_TXN_READ);
@@ -4469,9 +4470,6 @@  static void __perf_event_read(void *info
 	}
 
 	data->ret = pmu->commit_txn(pmu);
-
-unlock:
-	raw_spin_unlock(&ctx->lock);
 }
 
 static inline u64 perf_event_count(struct perf_event *event)
@@ -4502,43 +4500,32 @@  static void calc_timer_values(struct per
 int perf_event_read_local(struct perf_event *event, u64 *value,
 			  u64 *enabled, u64 *running)
 {
-	unsigned long flags;
-	int ret = 0;
-
 	/*
 	 * Disabling interrupts avoids all counter scheduling (context
 	 * switches, timer based rotation and IPIs).
 	 */
-	local_irq_save(flags);
+	guard(irqsave)();
 
 	/*
 	 * It must not be an event with inherit set, we cannot read
 	 * all child counters from atomic context.
 	 */
-	if (event->attr.inherit) {
-		ret = -EOPNOTSUPP;
-		goto out;
-	}
+	if (event->attr.inherit)
+		return -EOPNOTSUPP;
 
 	/* If this is a per-task event, it must be for current */
 	if ((event->attach_state & PERF_ATTACH_TASK) &&
-	    event->hw.target != current) {
-		ret = -EINVAL;
-		goto out;
-	}
+	    event->hw.target != current)
+		return -EINVAL;
 
 	/* If this is a per-CPU event, it must be for this CPU */
 	if (!(event->attach_state & PERF_ATTACH_TASK) &&
-	    event->cpu != smp_processor_id()) {
-		ret = -EINVAL;
-		goto out;
-	}
+	    event->cpu != smp_processor_id())
+		return -EINVAL;
 
 	/* If this is a pinned event it must be running on this CPU */
-	if (event->attr.pinned && event->oncpu != smp_processor_id()) {
-		ret = -EBUSY;
-		goto out;
-	}
+	if (event->attr.pinned && event->oncpu != smp_processor_id())
+		return -EBUSY;
 
 	/*
 	 * If the event is currently on this CPU, its either a per-task event,
@@ -4558,10 +4545,8 @@  int perf_event_read_local(struct perf_ev
 		if (running)
 			*running = __running;
 	}
-out:
-	local_irq_restore(flags);
 
-	return ret;
+	return 0;
 }
 
 static int perf_event_read(struct perf_event *event, bool group)
@@ -4595,7 +4580,7 @@  static int perf_event_read(struct perf_e
 			.ret = 0,
 		};
 
-		preempt_disable();
+		guard(preempt)();
 		event_cpu = __perf_event_read_cpu(event, event_cpu);
 
 		/*
@@ -4609,19 +4594,15 @@  static int perf_event_read(struct perf_e
 		 * after this.
 		 */
 		(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
-		preempt_enable();
 		ret = data.ret;
 
 	} else if (state == PERF_EVENT_STATE_INACTIVE) {
 		struct perf_event_context *ctx = event->ctx;
-		unsigned long flags;
 
-		raw_spin_lock_irqsave(&ctx->lock, flags);
+		guard(raw_spinlock_irqsave)(&ctx->lock);
 		state = event->state;
-		if (state != PERF_EVENT_STATE_INACTIVE) {
-			raw_spin_unlock_irqrestore(&ctx->lock, flags);
+		if (state != PERF_EVENT_STATE_INACTIVE)
 			goto again;
-		}
 
 		/*
 		 * May read while context is not active (e.g., thread is
@@ -4635,7 +4616,6 @@  static int perf_event_read(struct perf_e
 		perf_event_update_time(event);
 		if (group)
 			perf_event_update_sibling_time(event);
-		raw_spin_unlock_irqrestore(&ctx->lock, flags);
 	}
 
 	return ret;