@@ -59,7 +59,7 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
if (list_empty(&dir->list))
return;
- sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT);
+ sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN);
list_for_each_entry(tracker, &dir->list, head)
++total;
@@ -154,11 +154,11 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir,
unsigned long entries[REF_TRACKER_STACK_ENTRIES];
struct ref_tracker *tracker;
unsigned int nr_entries;
- gfp_t gfp_mask = gfp;
+ gfp_t gfp_mask;
unsigned long flags;
- if (gfp & __GFP_DIRECT_RECLAIM)
- gfp_mask |= __GFP_NOFAIL;
+ gfp |= __GFP_NOWARN;
+ gfp_mask = (gfp & __GFP_DIRECT_RECLAIM) ? (gfp | __GFP_NOFAIL) : gfp;
*trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
if (unlikely(!tracker)) {
pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
@@ -191,7 +191,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
}
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
nr_entries = filter_irq_stacks(entries, nr_entries);
- stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
+ stack_handle = stack_depot_save(entries, nr_entries,
+ GFP_NOWAIT | __GFP_NOWARN);
spin_lock_irqsave(&dir->lock, flags);
if (tracker->dead) {
Library can be called in non-sleeping context, so it should not use __GFP_NOFAIL. Instead it should calmly handle allocation fails, for this __GFP_NOWARN has been added as well. Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com> --- lib/ref_tracker.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-)