diff mbox series

[v5,09/12] user_events: Optimize writing events by only copying data once

Message ID 20211116005047.1808-10-beaub@linux.microsoft.com (mailing list archive)
State Superseded
Headers show
Series user_events: Enable user processes to create and write to trace events | expand

Commit Message

Beau Belgrave Nov. 16, 2021, 12:50 a.m. UTC
Pass iterator through to probes to allow copying data directly to the
probe buffers instead of taking multiple copies. Enables eBPF user and
raw iterator types out to programs for no-copy scenarios.

Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com>
---
 kernel/trace/trace_events_user.c | 102 ++++++++++++++++++++++---------
 1 file changed, 74 insertions(+), 28 deletions(-)

Comments

kernel test robot Nov. 27, 2021, 3:09 p.m. UTC | #1
Hi Beau,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 67d4f6e3bf5dddced226fbf19704cdbbb0c98847]

url:    https://github.com/0day-ci/linux/commits/Beau-Belgrave/user_events-Enable-user-processes-to-create-and-write-to-trace-events/20211116-090533
base:   67d4f6e3bf5dddced226fbf19704cdbbb0c98847
config: hexagon-randconfig-r031-20211116 (https://download.01.org/0day-ci/archive/20211127/202111272346.r2ZhQ5KM-lkp@intel.com/config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project fbe72e41b99dc7994daac300d208a955be3e4a0a)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/5d08d02e133130c59d164c17756a0aa0abb5418c
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Beau-Belgrave/user_events-Enable-user-processes-to-create-and-write-to-trace-events/20211116-090533
        git checkout 5d08d02e133130c59d164c17756a0aa0abb5418c
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=hexagon SHELL=/bin/bash drivers/gpio/ kernel/trace/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> kernel/trace/trace_events_user.c:563:22: warning: comparison of distinct pointer types ('typeof (i->count) *' (aka 'unsigned int *') and 'typeof ((1UL << 16)) *' (aka 'unsigned long *')) [-Wcompare-distinct-pointer-types]
                   size_t copy_size = min(i->count, MAX_BPF_COPY_SIZE);
                                      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   include/linux/minmax.h:45:19: note: expanded from macro 'min'
   #define min(x, y)       __careful_cmp(x, y, <)
                           ^~~~~~~~~~~~~~~~~~~~~~
   include/linux/minmax.h:36:24: note: expanded from macro '__careful_cmp'
           __builtin_choose_expr(__safe_cmp(x, y), \
                                 ^~~~~~~~~~~~~~~~
   include/linux/minmax.h:26:4: note: expanded from macro '__safe_cmp'
                   (__typecheck(x, y) && __no_side_effects(x, y))
                    ^~~~~~~~~~~~~~~~~
   include/linux/minmax.h:20:28: note: expanded from macro '__typecheck'
           (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
                      ~~~~~~~~~~~~~~ ^  ~~~~~~~~~~~~~~
   1 warning generated.


vim +563 kernel/trace/trace_events_user.c

   537	
   538	#ifdef CONFIG_PERF_EVENTS
   539	static void user_event_bpf(struct user_event *user, struct iov_iter *i)
   540	{
   541		struct user_bpf_context context;
   542		struct user_bpf_iter bpf_i;
   543		char fast_data[MAX_STACK_BPF_DATA];
   544		void *temp = NULL;
   545	
   546		if ((user->flags & FLAG_BPF_ITER) && iter_is_iovec(i)) {
   547			/* Raw iterator */
   548			context.data_type = USER_BPF_DATA_ITER;
   549			context.data_len = i->count;
   550			context.iter = &bpf_i;
   551	
   552			bpf_i.iov_offset = i->iov_offset;
   553			bpf_i.iov = i->iov;
   554			bpf_i.nr_segs = i->nr_segs;
   555		} else if (i->nr_segs == 1 && iter_is_iovec(i)) {
   556			/* Single buffer from user */
   557			context.data_type = USER_BPF_DATA_USER;
   558			context.data_len = i->count;
   559			context.udata = i->iov->iov_base + i->iov_offset;
   560		} else {
   561			/* Multi buffer from user */
   562			struct iov_iter copy = *i;
 > 563			size_t copy_size = min(i->count, MAX_BPF_COPY_SIZE);
   564	
   565			context.data_type = USER_BPF_DATA_KERNEL;
   566			context.kdata = fast_data;
   567	
   568			if (unlikely(copy_size > sizeof(fast_data))) {
   569				temp = kmalloc(copy_size, GFP_NOWAIT);
   570	
   571				if (temp)
   572					context.kdata = temp;
   573				else
   574					copy_size = sizeof(fast_data);
   575			}
   576	
   577			context.data_len = copy_nofault(context.kdata,
   578							copy_size, &copy);
   579		}
   580	
   581		trace_call_bpf(&user->call, &context);
   582	
   583		kfree(temp);
   584	}
   585	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index badf505568dd..8b1557fc642e 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -41,6 +41,10 @@ 
 #define MAX_FIELD_ARRAY_SIZE (2 * PAGE_SIZE)
 #define MAX_FIELD_ARG_NAME 256
 
+#define MAX_BPF_COPY_SIZE PAGE_SIZE
+#define MAX_STACK_BPF_DATA 512
+#define copy_nofault copy_from_iter_nocache
+
 static char *register_page_data;
 
 static DEFINE_MUTEX(reg_mutex);
@@ -65,8 +69,7 @@  struct user_event_refs {
 	struct user_event *events[];
 };
 
-typedef void (*user_event_func_t) (struct user_event *user,
-				   void *data, u32 datalen,
+typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
 				   void *tpdata);
 
 static int user_event_parse(char *name, char *args, char *flags,
@@ -502,7 +505,7 @@  static struct user_event *find_user_event(char *name, u32 *outkey)
 /*
  * Writes the user supplied payload out to a trace file.
  */
-static void user_event_ftrace(struct user_event *user, void *data, u32 datalen,
+static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
 			      void *tpdata)
 {
 	struct trace_event_file *file;
@@ -518,41 +521,85 @@  static void user_event_ftrace(struct user_event *user, void *data, u32 datalen,
 
 	/* Allocates and fills trace_entry, + 1 of this is data payload */
 	entry = trace_event_buffer_reserve(&event_buffer, file,
-					   sizeof(*entry) + datalen);
+					   sizeof(*entry) + i->count);
 
 	if (unlikely(!entry))
 		return;
 
-	memcpy(entry + 1, data, datalen);
+	if (unlikely(!copy_nofault(entry + 1, i->count, i))) {
+		__trace_event_discard_commit(event_buffer.buffer,
+					     event_buffer.event);
+		return;
+	}
 
 	trace_event_buffer_commit(&event_buffer);
 }
 
 #ifdef CONFIG_PERF_EVENTS
+static void user_event_bpf(struct user_event *user, struct iov_iter *i)
+{
+	struct user_bpf_context context;
+	struct user_bpf_iter bpf_i;
+	char fast_data[MAX_STACK_BPF_DATA];
+	void *temp = NULL;
+
+	if ((user->flags & FLAG_BPF_ITER) && iter_is_iovec(i)) {
+		/* Raw iterator */
+		context.data_type = USER_BPF_DATA_ITER;
+		context.data_len = i->count;
+		context.iter = &bpf_i;
+
+		bpf_i.iov_offset = i->iov_offset;
+		bpf_i.iov = i->iov;
+		bpf_i.nr_segs = i->nr_segs;
+	} else if (i->nr_segs == 1 && iter_is_iovec(i)) {
+		/* Single buffer from user */
+		context.data_type = USER_BPF_DATA_USER;
+		context.data_len = i->count;
+		context.udata = i->iov->iov_base + i->iov_offset;
+	} else {
+		/* Multi buffer from user */
+		struct iov_iter copy = *i;
+		size_t copy_size = min(i->count, MAX_BPF_COPY_SIZE);
+
+		context.data_type = USER_BPF_DATA_KERNEL;
+		context.kdata = fast_data;
+
+		if (unlikely(copy_size > sizeof(fast_data))) {
+			temp = kmalloc(copy_size, GFP_NOWAIT);
+
+			if (temp)
+				context.kdata = temp;
+			else
+				copy_size = sizeof(fast_data);
+		}
+
+		context.data_len = copy_nofault(context.kdata,
+						copy_size, &copy);
+	}
+
+	trace_call_bpf(&user->call, &context);
+
+	kfree(temp);
+}
+
 /*
  * Writes the user supplied payload out to perf ring buffer or eBPF program.
  */
-static void user_event_perf(struct user_event *user, void *data, u32 datalen,
+static void user_event_perf(struct user_event *user, struct iov_iter *i,
 			    void *tpdata)
 {
 	struct hlist_head *perf_head;
 
-	if (bpf_prog_array_valid(&user->call)) {
-		struct user_bpf_context context = {0};
-
-		context.data_len = datalen;
-		context.data_type = USER_BPF_DATA_KERNEL;
-		context.kdata = data;
-
-		trace_call_bpf(&user->call, &context);
-	}
+	if (bpf_prog_array_valid(&user->call))
+		user_event_bpf(user, i);
 
 	perf_head = this_cpu_ptr(user->call.perf_events);
 
 	if (perf_head && !hlist_empty(perf_head)) {
 		struct trace_entry *perf_entry;
 		struct pt_regs *regs;
-		size_t size = sizeof(*perf_entry) + datalen;
+		size_t size = sizeof(*perf_entry) + i->count;
 		int context;
 
 		perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
@@ -563,7 +610,10 @@  static void user_event_perf(struct user_event *user, void *data, u32 datalen,
 
 		perf_fetch_caller_regs(regs);
 
-		memcpy(perf_entry + 1, data, datalen);
+		if (unlikely(!copy_nofault(perf_entry + 1, i->count, i))) {
+			perf_swevent_put_recursion_context(context);
+			return;
+		}
 
 		perf_trace_buf_submit(perf_entry, size, context,
 				      user->call.event.type, 1, regs,
@@ -987,32 +1037,28 @@  static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
 	if (likely(atomic_read(&tp->key.enabled) > 0)) {
 		struct tracepoint_func *probe_func_ptr;
 		user_event_func_t probe_func;
+		struct iov_iter copy;
 		void *tpdata;
-		void *kdata;
-		u32 datalen;
 
-		kdata = kmalloc(i->count, GFP_KERNEL);
-
-		if (unlikely(!kdata))
-			return -ENOMEM;
-
-		datalen = copy_from_iter(kdata, i->count, i);
+		if (unlikely(iov_iter_fault_in_readable(i, i->count)))
+			return -EFAULT;
 
 		rcu_read_lock_sched();
+		pagefault_disable();
 
 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
 
 		if (probe_func_ptr) {
 			do {
+				copy = *i;
 				probe_func = probe_func_ptr->func;
 				tpdata = probe_func_ptr->data;
-				probe_func(user, kdata, datalen, tpdata);
+				probe_func(user, &copy, tpdata);
 			} while ((++probe_func_ptr)->func);
 		}
 
+		pagefault_enable();
 		rcu_read_unlock_sched();
-
-		kfree(kdata);
 	}
 
 	return ret;