diff mbox series

rtnetlink: add rtnl_lock debug log

Message ID 20210429070237.3012-1-rocco.yue@mediatek.com (mailing list archive)
State New, archived
Headers show
Series rtnetlink: add rtnl_lock debug log | expand

Commit Message

Rocco Yue April 29, 2021, 7:02 a.m. UTC
We often encounter system hangs caused by certain processes
holding rtnl_lock for a long time. Even if there is a lock
detection mechanism in Linux, it is a bit troublesome and
affects the system performance. We hope to add a lightweight
debugging mechanism for detecting rtnl_lock.

Up to now, we have discovered and solved some potential bugs
through such debug information of this lightweight rtnl_lock,
which is helpful for us.

When you say Y for RTNL_LOCK_DEBUG, then the kernel will detect
if any function hold rtnl_lock too long and some key information
will be printed to help identify the issue point.

i.e: from the following logs, we can clear know that the pid=5546
RfxSender_4 process hold rtnl_lock for a long time, causing the
system hang. And we can also speculate that the delay operation
may be performed in devinet_ioctl(), resulting in rtnl_lock was
not released in time.

<6>[  141.151364] ----------- rtnl_print_btrace start -----------
<6>[  141.152079] RfxSender_4[5546][R] hold rtnl_lock more than 2 sec,
start time: 139129481562
<4>[  141.153114]  rtnl_lock+0x88/0xfc
<4>[  141.153523]  devinet_ioctl+0x190/0x1268
<4>[  141.154007]  inet_ioctl+0x108/0x1f4
<4>[  141.154449]  sock_do_ioctl+0x88/0x200
<4>[  141.154911]  sock_ioctl+0x4b0/0x884
<4>[  141.155367]  do_vfs_ioctl+0x6b0/0xcc4
<4>[  141.155830]  __arm64_sys_ioctl+0xc0/0xec
<4>[  141.156326]  el0_svc_common+0x130/0x2c0
<4>[  141.156810]  el0_svc_handler+0xd0/0xe0
<4>[  141.157283]  el0_svc+0x8/0xc
<4>[  141.157646] Call trace:
<4>[  141.157956]  dump_backtrace+0x0/0x240
<4>[  141.158418]  show_stack+0x18/0x24
<4>[  141.158836]  rtnl_print_btrace+0x138/0x1cc
<4>[  141.159362]  call_timer_fn+0x120/0x47c
<4>[  141.159834]  expire_timers+0x28c/0x420
<4>[  141.160306]  __run_timers+0x3d0/0x494
<4>[  141.160768]  run_timer_softirq+0x24/0x48
<4>[  141.161262]  __do_softirq+0x26c/0x968
<4>[  141.161725]  irq_exit+0x1f8/0x2b4
<4>[  141.162145]  __handle_domain_irq+0xdc/0x15c
<4>[  141.162672]  gic_handle_irq+0xe4/0x188
<4>[  141.163144]  el1_irq+0x104/0x200
<4>[  141.163559]  __const_udelay+0x118/0x1b0
<4>[  141.164044]  devinet_ioctl+0x1a0/0x1268
<4>[  141.164527]  inet_ioctl+0x108/0x1f4
<4>[  141.164968]  sock_do_ioctl+0x88/0x200
<4>[  141.165428]  sock_ioctl+0x4b0/0x884
<4>[  141.165868]  do_vfs_ioctl+0x6b0/0xcc4
<4>[  141.166330]  __arm64_sys_ioctl+0xc0/0xec
<4>[  141.166825]  el0_svc_common+0x130/0x2c0
<4>[  141.167308]  el0_svc_handler+0xd0/0xe0
<4>[  141.167786]  el0_svc+0x8/0xc
<6>[  141.168153] ------------ rtnl_print_btrace end -----------

<6>[  147.321389] rtnl_lock is held by [5546] from
[139129481562] to [147321378812]

Signed-off-by: Rocco Yue <rocco.yue@mediatek.com>
---
 lib/Kconfig.debug    |  9 +++++
 net/core/rtnetlink.c | 86 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 95 insertions(+)

Comments

Andy Shevchenko April 29, 2021, 9:32 a.m. UTC | #1
On Thu, Apr 29, 2021 at 10:21 AM Rocco Yue <rocco.yue@mediatek.com> wrote:
>
> We often encounter system hangs caused by certain processes
> holding rtnl_lock for a long time. Even if there is a lock
> detection mechanism in Linux, it is a bit troublesome and
> affects the system performance. We hope to add a lightweight
> debugging mechanism for detecting rtnl_lock.
>
> Up to now, we have discovered and solved some potential bugs
> through such debug information of this lightweight rtnl_lock,
> which is helpful for us.
>
> When you say Y for RTNL_LOCK_DEBUG, then the kernel will detect
> if any function hold rtnl_lock too long and some key information
> will be printed to help identify the issue point.
>
> i.e: from the following logs, we can clear know that the pid=5546

clearly

> RfxSender_4 process hold rtnl_lock for a long time, causing the

holds

> system hang. And we can also speculate that the delay operation

to hang

> may be performed in devinet_ioctl(), resulting in rtnl_lock was
> not released in time.
>
> <6>[  141.151364] ----------- rtnl_print_btrace start -----------

Can you, please, shrink this to the point?

> <6>[  141.152079] RfxSender_4[5546][R] hold rtnl_lock more than 2 sec,
> start time: 139129481562
> <4>[  141.153114]  rtnl_lock+0x88/0xfc
> <4>[  141.153523]  devinet_ioctl+0x190/0x1268
> <4>[  141.154007]  inet_ioctl+0x108/0x1f4
> <4>[  141.154449]  sock_do_ioctl+0x88/0x200
> <4>[  141.154911]  sock_ioctl+0x4b0/0x884
> <4>[  141.155367]  do_vfs_ioctl+0x6b0/0xcc4
> <4>[  141.155830]  __arm64_sys_ioctl+0xc0/0xec
> <4>[  141.156326]  el0_svc_common+0x130/0x2c0
> <4>[  141.156810]  el0_svc_handler+0xd0/0xe0
> <4>[  141.157283]  el0_svc+0x8/0xc
> <4>[  141.157646] Call trace:
> <4>[  141.157956]  dump_backtrace+0x0/0x240
> <4>[  141.158418]  show_stack+0x18/0x24
> <4>[  141.158836]  rtnl_print_btrace+0x138/0x1cc
> <4>[  141.159362]  call_timer_fn+0x120/0x47c
> <4>[  141.159834]  expire_timers+0x28c/0x420
> <4>[  141.160306]  __run_timers+0x3d0/0x494
> <4>[  141.160768]  run_timer_softirq+0x24/0x48
> <4>[  141.161262]  __do_softirq+0x26c/0x968
> <4>[  141.161725]  irq_exit+0x1f8/0x2b4
> <4>[  141.162145]  __handle_domain_irq+0xdc/0x15c
> <4>[  141.162672]  gic_handle_irq+0xe4/0x188
> <4>[  141.163144]  el1_irq+0x104/0x200
> <4>[  141.163559]  __const_udelay+0x118/0x1b0
> <4>[  141.164044]  devinet_ioctl+0x1a0/0x1268
> <4>[  141.164527]  inet_ioctl+0x108/0x1f4
> <4>[  141.164968]  sock_do_ioctl+0x88/0x200
> <4>[  141.165428]  sock_ioctl+0x4b0/0x884
> <4>[  141.165868]  do_vfs_ioctl+0x6b0/0xcc4
> <4>[  141.166330]  __arm64_sys_ioctl+0xc0/0xec
> <4>[  141.166825]  el0_svc_common+0x130/0x2c0
> <4>[  141.167308]  el0_svc_handler+0xd0/0xe0
> <4>[  141.167786]  el0_svc+0x8/0xc
> <6>[  141.168153] ------------ rtnl_print_btrace end -----------
>
> <6>[  147.321389] rtnl_lock is held by [5546] from
> [139129481562] to [147321378812]


...

> +static struct rtnl_debug_btrace_t rtnl_instance = {
> +       .task           = NULL,
> +       .pid            = 0,
> +       .start_time     = 0,
> +       .end_time       = 0,
> +       .nr_entries     = 0,

static assumes all 0:s, what's the point?

> +};

...

> +static void rtnl_print_btrace(struct timer_list *unused)
> +{
> +       pr_info("----------- %s start -----------\n", __func__);
> +       pr_info("%s[%d][%c] hold rtnl_lock more than 2 sec, start time: %llu\n",
> +               rtnl_instance.task->comm,
> +               rtnl_instance.pid,
> +               task_state_to_char(rtnl_instance.task),
> +               rtnl_instance.start_time);
> +       stack_trace_print(rtnl_instance.addrs, rtnl_instance.nr_entries, 0);

> +       show_stack(rtnl_instance.task, NULL, KERN_DEBUG);

Unaligned debug level.

> +       pr_info("------------ %s end -----------\n", __func__);

Looking into tons of these, perhaps you need to define pr_fmt(). I
haven't checked if it's already defined, though.

> +}

...

> +       if (rtnl_instance.end_time - rtnl_instance.start_time > 2000000000ULL) {

Perhaps you should use one of the defined constants from time64.h ?

> +               pr_info("rtnl_lock is held by [%d] from [%llu] to [%llu]\n",
> +                       rtnl_instance.pid,
> +                       rtnl_instance.start_time,
> +                       rtnl_instance.end_time);
> +       }
Tetsuo Handa April 30, 2021, 11:48 a.m. UTC | #2
On 2021/04/29 16:02, Rocco Yue wrote:
> +static void rtnl_print_btrace(struct timer_list *unused)
> +{
> +	pr_info("----------- %s start -----------\n", __func__);
> +	pr_info("%s[%d][%c] hold rtnl_lock more than 2 sec, start time: %llu\n",
> +		rtnl_instance.task->comm,
> +		rtnl_instance.pid,
> +		task_state_to_char(rtnl_instance.task),
> +		rtnl_instance.start_time);
> +	stack_trace_print(rtnl_instance.addrs, rtnl_instance.nr_entries, 0);

Do we want to print same traces every 2 seconds? 

Since it is possible to stall for e.g. 30 seconds, printing either only upon
first call to rtnl_print_btrace() for each stalling duration or only upon
end of stalling duration (i.e. from rtnl_relase_btrace()) is better?

> +	show_stack(rtnl_instance.task, NULL, KERN_DEBUG);

Why KERN_DEBUG ?

If you retrieve the output via dmesg, KERN_DEBUG would be fine.
But for syzkaller (which counts on printk() messages being printed to
consoles), KERN_INFO (or default) is expected.

> +	pr_info("------------ %s end -----------\n", __func__);
> +}
> +
> +static void rtnl_relase_btrace(void)
> +{
> +	rtnl_instance.end_time = sched_clock();
> +

You should del_timer_sync() here than

> +	if (rtnl_instance.end_time - rtnl_instance.start_time > 2000000000ULL) {
> +		pr_info("rtnl_lock is held by [%d] from [%llu] to [%llu]\n",
> +			rtnl_instance.pid,
> +			rtnl_instance.start_time,
> +			rtnl_instance.end_time);
> +	}
> +
> +	del_timer(&rtnl_chk_timer);

here in order to make sure that end message is printed only after
rtnl_print_btrace() messages are printed.

> +}
> +#endif
> +
Rocco Yue May 8, 2021, 8:53 a.m. UTC | #3
> > We often encounter system hangs caused by certain processes
> > holding rtnl_lock for a long time. Even if there is a lock
> > detection mechanism in Linux, it is a bit troublesome and
> > affects the system performance. We hope to add a lightweight
> > debugging mechanism for detecting rtnl_lock.
> >
> > Up to now, we have discovered and solved some potential bugs
> > through such debug information of this lightweight rtnl_lock,
> > which is helpful for us.
> >
> > When you say Y for RTNL_LOCK_DEBUG, then the kernel will detect
> > if any function hold rtnl_lock too long and some key information
> > will be printed to help identify the issue point.
> >
> > i.e: from the following logs, we can clear know that the pid=5546
> 
> clearly
> 
> > RfxSender_4 process hold rtnl_lock for a long time, causing the
> 
> holds
> 
> > system hang. And we can also speculate that the delay operation
> 
> to hang
> 

Thanks for your review, Andy.

I Will fix them.

> > may be performed in devinet_ioctl(), resulting in rtnl_lock was
> > not released in time.
> >
> > <6>[  141.151364] ----------- rtnl_print_btrace start -----------
> 
> Can you, please, shrink this to the point?
> 

Will shrink these points.

> > <6>[  141.152079] RfxSender_4[5546][R] hold rtnl_lock more than 2 sec,
> > start time: 139129481562
> > <4>[  141.153114]  rtnl_lock+0x88/0xfc
> > <4>[  141.153523]  devinet_ioctl+0x190/0x1268
> > <4>[  141.154007]  inet_ioctl+0x108/0x1f4
> > <4>[  141.154449]  sock_do_ioctl+0x88/0x200
> > <4>[  141.154911]  sock_ioctl+0x4b0/0x884
> > <4>[  141.155367]  do_vfs_ioctl+0x6b0/0xcc4
> > <4>[  141.155830]  __arm64_sys_ioctl+0xc0/0xec
> > <4>[  141.156326]  el0_svc_common+0x130/0x2c0
> > <4>[  141.156810]  el0_svc_handler+0xd0/0xe0
> > <4>[  141.157283]  el0_svc+0x8/0xc
> > <4>[  141.157646] Call trace:
> > <4>[  141.157956]  dump_backtrace+0x0/0x240
> > <4>[  141.158418]  show_stack+0x18/0x24
> > <4>[  141.158836]  rtnl_print_btrace+0x138/0x1cc
> > <4>[  141.159362]  call_timer_fn+0x120/0x47c
> > <4>[  141.159834]  expire_timers+0x28c/0x420
> > <4>[  141.160306]  __run_timers+0x3d0/0x494
> > <4>[  141.160768]  run_timer_softirq+0x24/0x48
> > <4>[  141.161262]  __do_softirq+0x26c/0x968
> > <4>[  141.161725]  irq_exit+0x1f8/0x2b4
> > <4>[  141.162145]  __handle_domain_irq+0xdc/0x15c
> > <4>[  141.162672]  gic_handle_irq+0xe4/0x188
> > <4>[  141.163144]  el1_irq+0x104/0x200
> > <4>[  141.163559]  __const_udelay+0x118/0x1b0
> > <4>[  141.164044]  devinet_ioctl+0x1a0/0x1268
> > <4>[  141.164527]  inet_ioctl+0x108/0x1f4
> > <4>[  141.164968]  sock_do_ioctl+0x88/0x200
> > <4>[  141.165428]  sock_ioctl+0x4b0/0x884
> > <4>[  141.165868]  do_vfs_ioctl+0x6b0/0xcc4
> > <4>[  141.166330]  __arm64_sys_ioctl+0xc0/0xec
> > <4>[  141.166825]  el0_svc_common+0x130/0x2c0
> > <4>[  141.167308]  el0_svc_handler+0xd0/0xe0
> > <4>[  141.167786]  el0_svc+0x8/0xc
> > <6>[  141.168153] ------------ rtnl_print_btrace end -----------
> >
> > <6>[  147.321389] rtnl_lock is held by [5546] from
> > [139129481562] to [147321378812]
> 
> 
> ...
> 
> > +static struct rtnl_debug_btrace_t rtnl_instance = {
> > +       .task           = NULL,
> > +       .pid            = 0,
> > +       .start_time     = 0,
> > +       .end_time       = 0,
> > +       .nr_entries     = 0,
> 
> static assumes all 0:s, what's the point?
> 

will fix it in the patch v2.

> > +};
> 
> ...
> 
> > +static void rtnl_print_btrace(struct timer_list *unused)
> > +{
> > +       pr_info("----------- %s start -----------\n", __func__);
> > +       pr_info("%s[%d][%c] hold rtnl_lock more than 2 sec, start time: %llu\n",
> > +               rtnl_instance.task->comm,
> > +               rtnl_instance.pid,
> > +               task_state_to_char(rtnl_instance.task),
> > +               rtnl_instance.start_time);
> > +       stack_trace_print(rtnl_instance.addrs, rtnl_instance.nr_entries, 0);
> 
> > +       show_stack(rtnl_instance.task, NULL, KERN_DEBUG);
> 
> Unaligned debug level.
> 

KERN_INFO is expected, I will fix it in patch v2.

> > +       pr_info("------------ %s end -----------\n", __func__);
> 
> Looking into tons of these, perhaps you need to define pr_fmt(). I
> haven't checked if it's already defined, though.
> 

In the files related rtnetlink, pr_fmt() is not defined. I am not sure
if I can define one since there are other error logs in the rtnetlink.c.

In addition, there is "rtnl_lock" field in the log, I usually use it to
retrieve whether rtnl_lock is held for a long time.

If adding pr_fmt() is better, it is ok to me :-)

> > +}
> 
> ...
> 
> > +       if (rtnl_instance.end_time - rtnl_instance.start_time > 2000000000ULL) {
> 
> Perhaps you should use one of the defined constants from time64.h ?
> 

Will have a separate patch for this.

> > +               pr_info("rtnl_lock is held by [%d] from [%llu] to [%llu]\n",
> > +                       rtnl_instance.pid,
> > +                       rtnl_instance.start_time,
> > +                       rtnl_instance.end_time);
> > +       }
> 

Best Regard
Rocco
Rocco Yue May 8, 2021, 8:54 a.m. UTC | #4
> > +static void rtnl_print_btrace(struct timer_list *unused)
> > +{
> > +	pr_info("----------- %s start -----------\n", __func__);
> > +	pr_info("%s[%d][%c] hold rtnl_lock more than 2 sec, start time: %llu\n",
> > +		rtnl_instance.task->comm,
> > +		rtnl_instance.pid,
> > +		task_state_to_char(rtnl_instance.task),
> > +		rtnl_instance.start_time);
> > +	stack_trace_print(rtnl_instance.addrs, rtnl_instance.nr_entries, 0);
> 
> Do we want to print same traces every 2 seconds? 
> 
> Since it is possible to stall for e.g. 30 seconds, printing either only upon
> first call to rtnl_print_btrace() for each stalling duration or only upon
> end of stalling duration (i.e. from rtnl_relase_btrace()) is better?
> 

Thanks for your review, Tetsuo.

rtnl_chk_timer is a one-shot timer, I don't rearm the timer after
expire, therefore traces are only logged once after rtnl_lock was held
for more than 2 seconds, rather than every 2 seconds. In this way, even
if this rtnl_lock debug mechanism is enabled, the overhead is also
affordable in the user load.

As far as I understand it, there won't be the problem you mentioned.

In my experience, it's more appropriate to print trace when
call rtnl_print_btrace(), we can't expect that rtnl_lock will be
released before kernel reboot. And after holding the rtnl_lock for more
than 2s, we will print these logs, which is more helpful for us.

> > +	show_stack(rtnl_instance.task, NULL, KERN_DEBUG);
> 
> Why KERN_DEBUG ?
> 
> If you retrieve the output via dmesg, KERN_DEBUG would be fine.
> But for syzkaller (which counts on printk() messages being printed to
> consoles), KERN_INFO (or default) is expected.
> 

KERN_INFO is expected, I will fix it in patch v2.

> > +	pr_info("------------ %s end -----------\n", __func__);
> > +}
> > +
> > +static void rtnl_relase_btrace(void)
> > +{
> > +	rtnl_instance.end_time = sched_clock();
> > +
> 
> You should del_timer_sync() here than
> 
> > +	if (rtnl_instance.end_time - rtnl_instance.start_time > 2000000000ULL) {
> > +		pr_info("rtnl_lock is held by [%d] from [%llu] to [%llu]\n",
> > +			rtnl_instance.pid,
> > +			rtnl_instance.start_time,
> > +			rtnl_instance.end_time);
> > +	}
> > +
> > +	del_timer(&rtnl_chk_timer);
> 
> here in order to make sure that end message is printed only after
> rtnl_print_btrace() messages are printed.
> 

Will have a separate patch for this.

> > +}
> > +#endif
> > +
> 

Best Regards
Rocco
diff mbox series

Patch

diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2c7f46b366f1..d0d7457fa394 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2019,6 +2019,15 @@  config KCOV_IRQ_AREA_SIZE
 	  soft interrupts. This specifies the size of those areas in the
 	  number of unsigned long words.
 
+config RTNL_LOCK_DEBUG
+	bool "rtnl_lock debugging, deadlock detection"
+	select STACKTRACE
+	help
+	  If you say Y here then the kernel will detect whether any process
+	  hold rtnl_lock too long and some key information will be printed
+	  to help identify the issue point.
+	  If unsure, say N.
+
 menuconfig RUNTIME_TESTING_MENU
 	bool "Runtime Testing"
 	def_bool y
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3485b16a7ff3..ddf374aec6af 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -57,6 +57,86 @@ 
 #define RTNL_MAX_TYPE		50
 #define RTNL_SLAVE_MAX_TYPE	40
 
+#ifdef CONFIG_RTNL_LOCK_DEBUG
+#include <linux/sched/debug.h>
+#include <linux/stacktrace.h>
+
+/* Debug log and btrace will be printed when the rtnl_lock
+ * is held for more than RTNL_LOCK_MAX_HOLD_TIME seconds
+ */
+#define RTNL_LOCK_MAX_HOLD_TIME 2
+
+#define RTNL_LOCK_MAX_TRACE     10    /* stack trace length */
+
+struct rtnl_debug_btrace_t {
+	struct task_struct *task;
+	int pid;
+	unsigned long long start_time;
+	unsigned long long end_time;
+	unsigned long   addrs[RTNL_LOCK_MAX_TRACE];
+	unsigned int    nr_entries;
+};
+
+static struct rtnl_debug_btrace_t rtnl_instance = {
+	.task		= NULL,
+	.pid		= 0,
+	.start_time	= 0,
+	.end_time	= 0,
+	.nr_entries	= 0,
+};
+
+static void rtnl_print_btrace(struct timer_list *unused);
+static DEFINE_TIMER(rtnl_chk_timer, rtnl_print_btrace);
+
+/* Save stack trace to the given array of RTNL_LOCK_MAX_TRACE size.
+ */
+static int __save_stack_trace(unsigned long *trace)
+{
+	return stack_trace_save(trace, RTNL_LOCK_MAX_TRACE, 0);
+}
+
+static void rtnl_get_btrace(struct task_struct *who)
+{
+	unsigned long expires;
+
+	rtnl_instance.task = who;
+	rtnl_instance.pid = who->pid;
+	rtnl_instance.start_time = sched_clock();
+	rtnl_instance.end_time = 0;
+	rtnl_instance.nr_entries = __save_stack_trace(rtnl_instance.addrs);
+
+	expires = jiffies + RTNL_LOCK_MAX_HOLD_TIME * HZ;
+	mod_timer(&rtnl_chk_timer, expires);
+}
+
+static void rtnl_print_btrace(struct timer_list *unused)
+{
+	pr_info("----------- %s start -----------\n", __func__);
+	pr_info("%s[%d][%c] hold rtnl_lock more than 2 sec, start time: %llu\n",
+		rtnl_instance.task->comm,
+		rtnl_instance.pid,
+		task_state_to_char(rtnl_instance.task),
+		rtnl_instance.start_time);
+	stack_trace_print(rtnl_instance.addrs, rtnl_instance.nr_entries, 0);
+	show_stack(rtnl_instance.task, NULL, KERN_DEBUG);
+	pr_info("------------ %s end -----------\n", __func__);
+}
+
+static void rtnl_relase_btrace(void)
+{
+	rtnl_instance.end_time = sched_clock();
+
+	if (rtnl_instance.end_time - rtnl_instance.start_time > 2000000000ULL) {
+		pr_info("rtnl_lock is held by [%d] from [%llu] to [%llu]\n",
+			rtnl_instance.pid,
+			rtnl_instance.start_time,
+			rtnl_instance.end_time);
+	}
+
+	del_timer(&rtnl_chk_timer);
+}
+#endif
+
 struct rtnl_link {
 	rtnl_doit_func		doit;
 	rtnl_dumpit_func	dumpit;
@@ -70,6 +150,9 @@  static DEFINE_MUTEX(rtnl_mutex);
 void rtnl_lock(void)
 {
 	mutex_lock(&rtnl_mutex);
+#ifdef CONFIG_RTNL_LOCK_DEBUG
+	rtnl_get_btrace(current);
+#endif
 }
 EXPORT_SYMBOL(rtnl_lock);
 
@@ -104,6 +187,9 @@  void __rtnl_unlock(void)
 		cond_resched();
 		head = next;
 	}
+#ifdef CONFIG_RTNL_LOCK_DEBUG
+	rtnl_relase_btrace();
+#endif
 }
 
 void rtnl_unlock(void)