@@ -5988,14 +5988,60 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
return __tracing_resize_ring_buffer(tr, size, cpu_id);
}
+struct trace_mod_entry {
+ unsigned long mod_addr;
+ char mod_name[MODULE_NAME_LEN];
+};
+
struct trace_scratch {
unsigned long kaslr_addr;
+ unsigned long nr_entries;
+ struct trace_mod_entry entries[];
};
+static int save_mod(struct module *mod, void *data)
+{
+ struct trace_array *tr = data;
+ struct trace_scratch *tscratch;
+ struct trace_mod_entry *entry;
+ unsigned int size;
+
+ tscratch = tr->scratch;
+ if (!tscratch)
+ return -1;
+ size = tr->scratch_size;
+
+ if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
+ return -1;
+
+ entry = &tscratch->entries[tscratch->nr_entries];
+
+ tscratch->nr_entries++;
+
+ entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
+ strscpy(entry->mod_name, mod->name);
+
+ return 0;
+}
+
static void update_last_data(struct trace_array *tr)
{
struct trace_scratch *tscratch;
+ if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
+ return;
+
+ /* Reset the module list and reload them */
+ if (tr->scratch) {
+ struct trace_scratch *tscratch = tr->scratch;
+
+ memset(tscratch->entries, 0,
+ flex_array_size(tscratch, entries, tscratch->nr_entries));
+ tscratch->nr_entries = 0;
+
+ module_for_each_mod(save_mod, tr);
+ }
+
if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
return;
@@ -9220,6 +9266,46 @@ static struct dentry *trace_instance_dir;
static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
+static void setup_trace_scratch(struct trace_array *tr,
+ struct trace_scratch *tscratch, unsigned int size)
+{
+ struct trace_mod_entry *entry;
+
+ if (!tscratch)
+ return;
+
+ tr->scratch = tscratch;
+ tr->scratch_size = size;
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ if (tscratch->kaslr_addr)
+ tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
+#endif
+
+ if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
+ goto reset;
+
+ /* Check if each module name is a valid string */
+ for (int i = 0; i < tscratch->nr_entries; i++) {
+ int n;
+
+ entry = &tscratch->entries[i];
+
+ for (n = 0; n < MODULE_NAME_LEN; n++) {
+ if (entry->mod_name[n] == '\0')
+ break;
+ if (!isprint(entry->mod_name[n]))
+ goto reset;
+ }
+ if (n == MODULE_NAME_LEN)
+ goto reset;
+ }
+ return;
+ reset:
+ /* Invalid trace modules */
+ memset(tscratch, 0, size);
+}
+
static int
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
{
@@ -9232,21 +9318,15 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
buf->tr = tr;
if (tr->range_addr_start && tr->range_addr_size) {
+ /* Add scratch buffer to handle 128 modules */
buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
tr->range_addr_start,
tr->range_addr_size,
- sizeof(*tscratch));
+ struct_size(tscratch, entries, 128));
tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
- if (tscratch) {
- tr->scratch = tscratch;
- tr->scratch_size = scratch_size;
+ setup_trace_scratch(tr, tscratch, scratch_size);
-#ifdef CONFIG_RANDOMIZE_BASE
- if (tscratch->kaslr_addr)
- tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
-#endif
- }
/*
* This is basically the same as a mapped buffer,
* with the same restrictions.