@@ -4841,8 +4841,15 @@ static void add_xhlock(struct held_lock *hlock)
xhlock->trace.entries = xhlock->trace_entries;
if (crossrelease_fullstack) {
+ int i;
+
xhlock->trace.skip = 3;
save_stack_trace(&xhlock->trace);
+
+ for (i = 0; i < xhlock->trace.nr_entries; i++) {
+ if (xhlock->trace.entries[i] == 0x6b6b6b6b6b6b6b6bUL)
+ print_lockdep_off("BUG: list poison in add_xlock!");
+ }
} else {
xhlock->trace.nr_entries = 1;
xhlock->trace.entries[0] = hlock->acquire_ip;
@@ -4950,12 +4957,18 @@ static int copy_trace(struct stack_trace *trace)
unsigned long *buf = stack_trace + nr_stack_trace_entries;
unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
unsigned int nr = min(max_nr, trace->nr_entries);
+ int i;
trace->nr_entries = nr;
memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
trace->entries = buf;
nr_stack_trace_entries += nr;
+ for (i = 0; i < trace->nr_entries; i++) {
+ if (trace->entries[i] == 0x6b6b6b6b6b6b6b6bUL)
+ print_lockdep_off("BUG: list poison in copy_trace!");
+ }
+
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -20,6 +20,12 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
for (i = 0; i < trace->nr_entries; i++)
printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
+
+ for (i = 0; i < trace->nr_entries; i++) {
+ if (trace->entries[i] == 0x6b6b6b6b6b6b6b6bUL)
+ printk("BUG: list poison in print_trace!");
+ }
+
}
EXPORT_SYMBOL_GPL(print_stack_trace);
@@ -1095,6 +1095,7 @@ config LOCKDEP_CROSSRELEASE
bool "Enable cross-release checking"
depends on PROVE_LOCKING
select LOCKDEP_COMPLETIONS
+ default y
help
This makes lockdep work for crosslock which is a lock allowed to
be released in a different context from the acquisition context.
@@ -1208,7 +1209,7 @@ config LOCKDEP_COMPLETIONS
config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
bool "Enable the boot parameter, crossrelease_fullstack"
depends on LOCKDEP_CROSSRELEASE
- default n
+ default y
help
The lockdep "cross-release" feature needs to record stack traces
(of calling functions) for all acquisitions, for eventual later
Only way to convince our CI to enable stuff that's new and defaulting to off. Obviously not for merging. v2: Also enable fullstack backtraces. v3: Try to chase this elusive stack trace corruption CI is seeing. v4: Make it compile. Silly me. v5: Even sillier me. v6: Still confused. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> --- kernel/locking/lockdep.c | 13 +++++++++++++ kernel/stacktrace.c | 6 ++++++ lib/Kconfig.debug | 3 ++- 3 files changed, 21 insertions(+), 1 deletion(-)