@@ -506,12 +506,14 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
bucket = &stack_table[hash & stack_hash_mask];
read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
/* Fast path: look the stack trace up without full locking. */
found = find_stack(bucket, entries, nr_entries, hash);
if (found) {
if (depot_flags & STACK_DEPOT_FLAG_GET)
refcount_inc(&found->count);
+ printk_deferred_exit();
read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
}
@@ -520,6 +522,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
if (new_pool_required)
need_alloc = true;
+ printk_deferred_exit();
read_unlock_irqrestore(&pool_rwlock, flags);
/*
@@ -541,6 +544,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
}
write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
found = find_stack(bucket, entries, nr_entries, hash);
if (!found) {
@@ -562,6 +566,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
depot_keep_new_pool(&prealloc);
}
+ printk_deferred_exit();
write_unlock_irqrestore(&pool_rwlock, flags);
exit:
if (prealloc) {
@@ -600,9 +605,11 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
return 0;
read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
stack = depot_fetch_stack(handle);
+ printk_deferred_exit();
read_unlock_irqrestore(&pool_rwlock, flags);
*entries = stack->entries;
@@ -619,6 +626,7 @@ void stack_depot_put(depot_stack_handle_t handle)
return;
write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
stack = depot_fetch_stack(handle);
if (WARN_ON(!stack))
@@ -633,6 +641,7 @@ void stack_depot_put(depot_stack_handle_t handle)
}
out:
+ printk_deferred_exit();
write_unlock_irqrestore(&pool_rwlock, flags);
}
EXPORT_SYMBOL_GPL(stack_depot_put);