@@ -36,6 +36,9 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit);
@@ -56,6 +59,11 @@ static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
{
}
+static inline void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
@@ -14,6 +14,38 @@ struct ref_tracker {
depot_stack_handle_t free_stack_handle;
};
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+ struct ref_tracker *tracker;
+ unsigned int i = 0;
+
+ lockdep_assert_held(&dir->lock);
+
+ list_for_each_entry(tracker, &dir->list, head) {
+ if (i < display_limit) {
+ pr_err("leaked reference.\n");
+ if (tracker->alloc_stack_handle)
+ stack_depot_print(tracker->alloc_stack_handle);
+ i++;
+ } else {
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(ref_tracker_dir_print_locked);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dir->lock, flags);
+ ref_tracker_dir_print_locked(dir, display_limit);
+ spin_unlock_irqrestore(&dir->lock, flags);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print);
+
void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
{
struct ref_tracker *tracker, *n;
@@ -27,13 +59,13 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
kfree(tracker);
dir->quarantine_avail++;
}
- list_for_each_entry_safe(tracker, n, &dir->list, head) {
- pr_err("leaked reference.\n");
- if (tracker->alloc_stack_handle)
- stack_depot_print(tracker->alloc_stack_handle);
+ if (!list_empty(&dir->list)) {
+ ref_tracker_dir_print_locked(dir, 16);
leak = true;
- list_del(&tracker->head);
- kfree(tracker);
+ list_for_each_entry_safe(tracker, n, &dir->list, head) {
+ list_del(&tracker->head);
+ kfree(tracker);
+ }
}
spin_unlock_irqrestore(&dir->lock, flags);
WARN_ON_ONCE(leak);
@@ -42,28 +74,6 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
}
EXPORT_SYMBOL(ref_tracker_dir_exit);
-void ref_tracker_dir_print(struct ref_tracker_dir *dir,
- unsigned int display_limit)
-{
- struct ref_tracker *tracker;
- unsigned long flags;
- unsigned int i = 0;
-
- spin_lock_irqsave(&dir->lock, flags);
- list_for_each_entry(tracker, &dir->list, head) {
- if (i < display_limit) {
- pr_err("leaked reference.\n");
- if (tracker->alloc_stack_handle)
- stack_depot_print(tracker->alloc_stack_handle);
- i++;
- } else {
- break;
- }
- }
- spin_unlock_irqrestore(&dir->lock, flags);
-}
-EXPORT_SYMBOL(ref_tracker_dir_print);
-
int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp,
gfp_t gfp)
To have reliable detection of leaks, caller must be able to check under the same lock both: tracked counter and the leaks. dir.lock is natural candidate for such lock and unlocked print helper can be called with this lock taken. As a bonus we can reuse this helper in ref_tracker_dir_exit. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> --- include/linux/ref_tracker.h | 8 ++++++ lib/ref_tracker.c | 66 ++++++++++++++++++++++++++------------------- 2 files changed, 46 insertions(+), 28 deletions(-)