From patchwork Tue Jan 11 17:25:56 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Beau Belgrave X-Patchwork-Id: 12710160 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AEE58C433FE for ; Tue, 11 Jan 2022 17:26:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1344206AbiAKR0m (ORCPT ); Tue, 11 Jan 2022 12:26:42 -0500 Received: from linux.microsoft.com ([13.77.154.182]:33938 "EHLO linux.microsoft.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1344191AbiAKR0f (ORCPT ); Tue, 11 Jan 2022 12:26:35 -0500 Received: from localhost.localdomain (c-73-140-2-214.hsd1.wa.comcast.net [73.140.2.214]) by linux.microsoft.com (Postfix) with ESMTPSA id EE0AB20B7189; Tue, 11 Jan 2022 09:26:32 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com EE0AB20B7189 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1641921993; bh=fCrq+Ft58k8KoJoRaRF1cmImMtMfjoGzeeDWJ6O3ASA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Wmzzx/VEilxJIqw6rdx94DWmLeLqapUKjJ7nthpAK/f/41QE+Hbmr/OZZVhOSam3c G4UutDwtymliIXyK1x/54/2IpbeJODp4H0I4cFzvK6+AWS3Zs68VdQQzCnt1TAIIIn UL76FOjjPtGIWHZrEb5WNwAH/6k3MqZoz+DFMtlM= From: Beau Belgrave To: rostedt@goodmis.org, mhiramat@kernel.org Cc: linux-trace-devel@vger.kernel.org, linux-kernel@vger.kernel.org, beaub@linux.microsoft.com Subject: [PATCH v9 06/12] user_events: Validate user payloads for size and null termination Date: Tue, 11 Jan 2022 09:25:56 -0800 Message-Id: <20220111172602.2513-7-beaub@linux.microsoft.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220111172602.2513-1-beaub@linux.microsoft.com> References: <20220111172602.2513-1-beaub@linux.microsoft.com> Precedence: bulk List-ID: X-Mailing-List: linux-trace-devel@vger.kernel.org Add validation to ensure data is at or greater than the min size for the fields of the event. If a dynamic array is used and is a type of char, ensure null termination of the array exists. Signed-off-by: Beau Belgrave --- kernel/trace/trace_events_user.c | 147 ++++++++++++++++++++++++++++--- 1 file changed, 133 insertions(+), 14 deletions(-) diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index e4b166cba1bb..7e1c1282c423 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -64,9 +64,11 @@ struct user_event { struct dyn_event devent; struct hlist_node node; struct list_head fields; + struct list_head validators; atomic_t refcnt; int index; int flags; + int min_size; }; /* @@ -81,8 +83,17 @@ struct user_event_refs { struct user_event *events[]; }; +#define VALIDATOR_ENSURE_NULL (1 << 0) +#define VALIDATOR_REL (1 << 1) + +struct user_event_validator { + struct list_head link; + int offset; + int flags; +}; + typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, - void *tpdata); + void *tpdata, bool *faulted); static int user_event_parse(char *name, char *args, char *flags, struct user_event **newuser); @@ -215,6 +226,17 @@ static int user_field_size(const char *type) return -EINVAL; } +static void user_event_destroy_validators(struct user_event *user) +{ + struct user_event_validator *validator, *next; + struct list_head *head = &user->validators; + + list_for_each_entry_safe(validator, next, head, link) { + list_del(&validator->link); + kfree(validator); + } +} + static void user_event_destroy_fields(struct user_event *user) { struct ftrace_event_field *field, *next; @@ -230,13 +252,43 @@ static int user_event_add_field(struct user_event *user, const char *type, const char *name, int offset, int size, int is_signed, int filter_type) { + struct user_event_validator *validator; struct ftrace_event_field *field; + int validator_flags = 0; field = kmalloc(sizeof(*field), GFP_KERNEL); if (!field) return -ENOMEM; + if (str_has_prefix(type, "__data_loc ")) + goto add_validator; + + if (str_has_prefix(type, "__rel_loc ")) { + validator_flags |= VALIDATOR_REL; + goto add_validator; + } + + goto add_field; + +add_validator: + if (strstr(type, "char") != 0) + validator_flags |= VALIDATOR_ENSURE_NULL; + + validator = kmalloc(sizeof(*validator), GFP_KERNEL); + + if (!validator) { + kfree(field); + return -ENOMEM; + } + + validator->flags = validator_flags; + validator->offset = offset; + + /* Want sequential access when validating */ + list_add_tail(&validator->link, &user->validators); + +add_field: field->type = type; field->name = name; field->offset = offset; @@ -246,6 +298,12 @@ static int user_event_add_field(struct user_event *user, const char *type, list_add(&field->link, &user->fields); + /* + * Min size from user writes that are required, this does not include + * the size of trace_entry (common fields). + */ + user->min_size = (offset + size) - sizeof(struct trace_entry); + return 0; } @@ -517,6 +575,7 @@ static int destroy_user_event(struct user_event *user) clear_bit(user->index, page_bitmap); hash_del(&user->node); + user_event_destroy_validators(user); kfree(user->call.print_fmt); kfree(EVENT_NAME(user)); kfree(user); @@ -538,15 +597,49 @@ static struct user_event *find_user_event(char *name, u32 *outkey) return NULL; } +static int user_event_validate(struct user_event *user, void *data, int len) +{ + struct list_head *head = &user->validators; + struct user_event_validator *validator; + void *pos, *end = data + len; + u32 loc, offset, size; + + list_for_each_entry(validator, head, link) { + pos = data + validator->offset; + + /* Already done min_size check, no bounds check here */ + loc = *(u32 *)pos; + offset = loc & 0xffff; + size = loc >> 16; + + if (likely(validator->flags & VALIDATOR_REL)) + pos += offset + sizeof(loc); + else + pos = data + offset; + + pos += size; + + if (unlikely(pos > end)) + return -EFAULT; + + if (likely(validator->flags & VALIDATOR_ENSURE_NULL)) + if (unlikely(*(char *)(pos - 1) != '\0')) + return -EFAULT; + } + + return 0; +} + /* * Writes the user supplied payload out to a trace file. */ static void user_event_ftrace(struct user_event *user, struct iov_iter *i, - void *tpdata) + void *tpdata, bool *faulted) { struct trace_event_file *file; struct trace_entry *entry; struct trace_event_buffer event_buffer; + size_t size = sizeof(*entry) + i->count; file = (struct trace_event_file *)tpdata; @@ -556,17 +649,25 @@ static void user_event_ftrace(struct user_event *user, struct iov_iter *i, return; /* Allocates and fills trace_entry, + 1 of this is data payload */ - entry = trace_event_buffer_reserve(&event_buffer, file, - sizeof(*entry) + i->count); + entry = trace_event_buffer_reserve(&event_buffer, file, size); if (unlikely(!entry)) return; if (unlikely(!copy_nofault(entry + 1, i->count, i))) - __trace_event_discard_commit(event_buffer.buffer, - event_buffer.event); - else - trace_event_buffer_commit(&event_buffer); + goto discard; + + if (!list_empty(&user->validators) && + unlikely(user_event_validate(user, entry, size))) + goto discard; + + trace_event_buffer_commit(&event_buffer); + + return; +discard: + *faulted = true; + __trace_event_discard_commit(event_buffer.buffer, + event_buffer.event); } #ifdef CONFIG_PERF_EVENTS @@ -621,7 +722,7 @@ static void user_event_bpf(struct user_event *user, struct iov_iter *i) * Writes the user supplied payload out to perf ring buffer or eBPF program. */ static void user_event_perf(struct user_event *user, struct iov_iter *i, - void *tpdata) + void *tpdata, bool *faulted) { struct hlist_head *perf_head; @@ -644,14 +745,21 @@ static void user_event_perf(struct user_event *user, struct iov_iter *i, perf_fetch_caller_regs(regs); - if (unlikely(!copy_nofault(perf_entry + 1, i->count, i))) { - perf_swevent_put_recursion_context(context); - return; - } + if (unlikely(!copy_nofault(perf_entry + 1, i->count, i))) + goto discard; + + if (!list_empty(&user->validators) && + unlikely(user_event_validate(user, perf_entry, size))) + goto discard; perf_trace_buf_submit(perf_entry, size, context, user->call.event.type, 1, regs, perf_head, NULL); + + return; +discard: + *faulted = true; + perf_swevent_put_recursion_context(context); } } #endif @@ -966,6 +1074,7 @@ static int user_event_parse(char *name, char *args, char *flags, INIT_LIST_HEAD(&user->class.fields); INIT_LIST_HEAD(&user->fields); + INIT_LIST_HEAD(&user->validators); user->tracepoint.name = name; @@ -1014,6 +1123,7 @@ static int user_event_parse(char *name, char *args, char *flags, return 0; put_user: user_event_destroy_fields(user); + user_event_destroy_validators(user); kfree(user); return ret; } @@ -1071,6 +1181,9 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) if (unlikely(user == NULL)) return -ENOENT; + if (unlikely(i->count < user->min_size)) + return -EINVAL; + tp = &user->tracepoint; /* @@ -1082,10 +1195,13 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) user_event_func_t probe_func; struct iov_iter copy; void *tpdata; + bool faulted; if (unlikely(fault_in_iov_iter_readable(i, i->count))) return -EFAULT; + faulted = false; + rcu_read_lock_sched(); probe_func_ptr = rcu_dereference_sched(tp->funcs); @@ -1095,11 +1211,14 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) copy = *i; probe_func = probe_func_ptr->func; tpdata = probe_func_ptr->data; - probe_func(user, ©, tpdata); + probe_func(user, ©, tpdata, &faulted); } while ((++probe_func_ptr)->func); } rcu_read_unlock_sched(); + + if (unlikely(faulted)) + return -EFAULT; } return ret;