diff mbox series

[v3,39/57] perf: Simplify perf_event_*_userpage()

Message ID 20230612093540.324593804@infradead.org (mailing list archive)
State Handled Elsewhere
Delegated to: Paul Moore
Headers show
Series Scope-based Resource Management | expand

Commit Message

Peter Zijlstra June 12, 2023, 9:07 a.m. UTC
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/events/core.c |   30 ++++++++++--------------------
 1 file changed, 10 insertions(+), 20 deletions(-)
diff mbox series

Patch

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5971,10 +5971,10 @@  static void perf_event_init_userpage(str
 	struct perf_event_mmap_page *userpg;
 	struct perf_buffer *rb;
 
-	rcu_read_lock();
+	guard(rcu)();
 	rb = rcu_dereference(event->rb);
 	if (!rb)
-		goto unlock;
+		return;
 
 	userpg = rb->user_page;
 
@@ -5983,9 +5983,6 @@  static void perf_event_init_userpage(str
 	userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
 	userpg->data_offset = PAGE_SIZE;
 	userpg->data_size = perf_data_size(rb);
-
-unlock:
-	rcu_read_unlock();
 }
 
 void __weak arch_perf_update_userpage(
@@ -6004,10 +6001,10 @@  void perf_event_update_userpage(struct p
 	struct perf_buffer *rb;
 	u64 enabled, running, now;
 
-	rcu_read_lock();
+	guard(rcu)();
 	rb = rcu_dereference(event->rb);
 	if (!rb)
-		goto unlock;
+		return;
 
 	/*
 	 * compute total_time_enabled, total_time_running
@@ -6025,7 +6022,7 @@  void perf_event_update_userpage(struct p
 	 * Disable preemption to guarantee consistent time stamps are stored to
 	 * the user page.
 	 */
-	preempt_disable();
+	guard(preempt)();
 	++userpg->lock;
 	barrier();
 	userpg->index = perf_event_index(event);
@@ -6043,9 +6040,6 @@  void perf_event_update_userpage(struct p
 
 	barrier();
 	++userpg->lock;
-	preempt_enable();
-unlock:
-	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
 
@@ -6061,27 +6055,23 @@  static vm_fault_t perf_mmap_fault(struct
 		return ret;
 	}
 
-	rcu_read_lock();
+	guard(rcu)();
 	rb = rcu_dereference(event->rb);
 	if (!rb)
-		goto unlock;
+		return ret;
 
 	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
-		goto unlock;
+		return ret;
 
 	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
 	if (!vmf->page)
-		goto unlock;
+		return ret;
 
 	get_page(vmf->page);
 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
 	vmf->page->index   = vmf->pgoff;
 
-	ret = 0;
-unlock:
-	rcu_read_unlock();
-
-	return ret;
+	return 0;
 }
 
 static void ring_buffer_attach(struct perf_event *event,