diff mbox series

[1/2] mm: fix missing handler for __GFP_NOWARN

Message ID 20220510113809.80626-1-zhengqi.arch@bytedance.com (mailing list archive)
State New
Headers show
Series [1/2] mm: fix missing handler for __GFP_NOWARN | expand

Commit Message

Qi Zheng May 10, 2022, 11:38 a.m. UTC
We expect no warnings to be issued when we specify __GFP_NOWARN, but
currently in paths like alloc_pages() and kmalloc(), there are still
some warnings printed, fix it.

Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
---
 include/linux/fault-inject.h |  2 ++
 lib/fault-inject.c           |  3 +++
 mm/failslab.c                |  3 +++
 mm/internal.h                | 11 +++++++++++
 mm/page_alloc.c              | 22 ++++++++++++----------
 5 files changed, 31 insertions(+), 10 deletions(-)

Comments

Andrew Morton May 10, 2022, 6:59 p.m. UTC | #1
On Tue, 10 May 2022 19:38:08 +0800 Qi Zheng <zhengqi.arch@bytedance.com> wrote:

> We expect no warnings to be issued when we specify __GFP_NOWARN, but
> currently in paths like alloc_pages() and kmalloc(), there are still
> some warnings printed, fix it.

Looks sane to me.

> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -35,6 +35,17 @@ struct folio_batch;
>  /* Do not use these with a slab allocator */
>  #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
>  
> +#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
> +	static bool __section(".data.once") __warned;			\
> +	int __ret_warn_once = !!(cond);					\
> +									\
> +	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
> +		__warned = true;					\
> +		WARN_ON(1);						\
> +	}								\
> +	unlikely(__ret_warn_once);					\
> +})

I don't think WARN_ON_ONCE_GFP is a good name for this.  But
WARN_ON_ONCE_IF_NOT_GFP_NOWARN is too long :(

WARN_ON_ONCE_NOWARN might be better.  No strong opinion here, really.

> @@ -4902,8 +4906,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  	 * We also sanity check to catch abuse of atomic reserves being used by
>  	 * callers that are not in atomic context.
>  	 */
> -	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
> -				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
> +	if (WARN_ON_ONCE_GFP((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
> +				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
>  		gfp_mask &= ~__GFP_ATOMIC;
>  
>  retry_cpuset:

I dropped this hunk - Neil's "mm: discard __GFP_ATOMIC"
(https://lkml.kernel.org/r/163712397076.13692.4727608274002939094@noble.neil.brown.name)
deleted this code.
Qi Zheng May 11, 2022, 2:19 a.m. UTC | #2
On 2022/5/11 2:59 AM, Andrew Morton wrote:
> On Tue, 10 May 2022 19:38:08 +0800 Qi Zheng <zhengqi.arch@bytedance.com> wrote:
> 
>> We expect no warnings to be issued when we specify __GFP_NOWARN, but
>> currently in paths like alloc_pages() and kmalloc(), there are still
>> some warnings printed, fix it.
> 
> Looks sane to me.
> 
>> --- a/mm/internal.h
>> +++ b/mm/internal.h
>> @@ -35,6 +35,17 @@ struct folio_batch;
>>   /* Do not use these with a slab allocator */
>>   #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
>>   
>> +#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
>> +	static bool __section(".data.once") __warned;			\
>> +	int __ret_warn_once = !!(cond);					\
>> +									\
>> +	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
>> +		__warned = true;					\
>> +		WARN_ON(1);						\
>> +	}								\
>> +	unlikely(__ret_warn_once);					\
>> +})
> 
> I don't think WARN_ON_ONCE_GFP is a good name for this.  But
> WARN_ON_ONCE_IF_NOT_GFP_NOWARN is too long :(
> 
> WARN_ON_ONCE_NOWARN might be better.  No strong opinion here, really.

I've thought about WARN_ON_ONCE_NOWARN, but I feel a little weird 
putting 'WARN' and 'NOWARN' together, how about WARN_ON_ONCE_IF_ALLOWED?

> 
>> @@ -4902,8 +4906,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>>   	 * We also sanity check to catch abuse of atomic reserves being used by
>>   	 * callers that are not in atomic context.
>>   	 */
>> -	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
>> -				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
>> +	if (WARN_ON_ONCE_GFP((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
>> +				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
>>   		gfp_mask &= ~__GFP_ATOMIC;
>>   
>>   retry_cpuset:
> 
> I dropped this hunk - Neil's "mm: discard __GFP_ATOMIC"
> (https://lkml.kernel.org/r/163712397076.13692.4727608274002939094@noble.neil.brown.name)
> deleted this code.
> 

This series is based on v5.18-rc5, I will rebase it to the latest next
branch and check if there are any missing WARN_ON_ONCEs that are not
being handled.

Thanks,
Qi
Andrew Morton May 11, 2022, 2:32 a.m. UTC | #3
On Wed, 11 May 2022 10:19:48 +0800 Qi Zheng <zhengqi.arch@bytedance.com> wrote:

> 
> ,,,
> >> --- a/mm/internal.h
> >> +++ b/mm/internal.h
> >> @@ -35,6 +35,17 @@ struct folio_batch;
> >>   /* Do not use these with a slab allocator */
> >>   #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
> >>   
> >> +#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
> >> +	static bool __section(".data.once") __warned;			\
> >> +	int __ret_warn_once = !!(cond);					\
> >> +									\
> >> +	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
> >> +		__warned = true;					\
> >> +		WARN_ON(1);						\
> >> +	}								\
> >> +	unlikely(__ret_warn_once);					\
> >> +})
> > 
> > I don't think WARN_ON_ONCE_GFP is a good name for this.  But
> > WARN_ON_ONCE_IF_NOT_GFP_NOWARN is too long :(
> > 
> > WARN_ON_ONCE_NOWARN might be better.  No strong opinion here, really.
> 
> I've thought about WARN_ON_ONCE_NOWARN, but I feel a little weird 
> putting 'WARN' and 'NOWARN' together, how about WARN_ON_ONCE_IF_ALLOWED?

I dunno.  WARN_ON_ONCE_GFP isn't too bad I suppose.  Add a comment over
the definition explaining it?

> > 
> >> @@ -4902,8 +4906,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> >>   	 * We also sanity check to catch abuse of atomic reserves being used by
> >>   	 * callers that are not in atomic context.
> >>   	 */
> >> -	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
> >> -				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
> >> +	if (WARN_ON_ONCE_GFP((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
> >> +				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
> >>   		gfp_mask &= ~__GFP_ATOMIC;
> >>   
> >>   retry_cpuset:
> > 
> > I dropped this hunk - Neil's "mm: discard __GFP_ATOMIC"
> > (https://lkml.kernel.org/r/163712397076.13692.4727608274002939094@noble.neil.brown.name)
> > deleted this code.
> > 
> 
> This series is based on v5.18-rc5, I will rebase it to the latest next
> branch and check if there are any missing WARN_ON_ONCEs that are not
> being handled.

Against git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm branch
mm-unstable, please.  That ends up in linux-next, with a delay.
Qi Zheng May 11, 2022, 2:36 a.m. UTC | #4
On 2022/5/11 10:32 AM, Andrew Morton wrote:
> On Wed, 11 May 2022 10:19:48 +0800 Qi Zheng <zhengqi.arch@bytedance.com> wrote:
> 
>>
>> ,,,
>>>> --- a/mm/internal.h
>>>> +++ b/mm/internal.h
>>>> @@ -35,6 +35,17 @@ struct folio_batch;
>>>>    /* Do not use these with a slab allocator */
>>>>    #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
>>>>    
>>>> +#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
>>>> +	static bool __section(".data.once") __warned;			\
>>>> +	int __ret_warn_once = !!(cond);					\
>>>> +									\
>>>> +	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
>>>> +		__warned = true;					\
>>>> +		WARN_ON(1);						\
>>>> +	}								\
>>>> +	unlikely(__ret_warn_once);					\
>>>> +})
>>>
>>> I don't think WARN_ON_ONCE_GFP is a good name for this.  But
>>> WARN_ON_ONCE_IF_NOT_GFP_NOWARN is too long :(
>>>
>>> WARN_ON_ONCE_NOWARN might be better.  No strong opinion here, really.
>>
>> I've thought about WARN_ON_ONCE_NOWARN, but I feel a little weird
>> putting 'WARN' and 'NOWARN' together, how about WARN_ON_ONCE_IF_ALLOWED?
> 
> I dunno.  WARN_ON_ONCE_GFP isn't too bad I suppose.  Add a comment over
> the definition explaining it?

OK, I will add a comment to it.

> 
>>>
>>>> @@ -4902,8 +4906,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>>>>    	 * We also sanity check to catch abuse of atomic reserves being used by
>>>>    	 * callers that are not in atomic context.
>>>>    	 */
>>>> -	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
>>>> -				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
>>>> +	if (WARN_ON_ONCE_GFP((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
>>>> +				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
>>>>    		gfp_mask &= ~__GFP_ATOMIC;
>>>>    
>>>>    retry_cpuset:
>>>
>>> I dropped this hunk - Neil's "mm: discard __GFP_ATOMIC"
>>> (https://lkml.kernel.org/r/163712397076.13692.4727608274002939094@noble.neil.brown.name)
>>> deleted this code.
>>>
>>
>> This series is based on v5.18-rc5, I will rebase it to the latest next
>> branch and check if there are any missing WARN_ON_ONCEs that are not
>> being handled.
> 
> Against git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm branch
> mm-unstable, please.  That ends up in linux-next, with a delay.

OK, will do.
Qi Zheng May 11, 2022, 5:12 a.m. UTC | #5
On 2022/5/10 7:38 PM, Qi Zheng wrote:
> We expect no warnings to be issued when we specify __GFP_NOWARN, but
> currently in paths like alloc_pages() and kmalloc(), there are still
> some warnings printed, fix it.

Hi Andrew,

Maybe we only need to deal with memory allocation failures, such as
should_fail() (This leads to deadlock in PATCH[2/2]). These 
WARN_ON_ONCE()s report usage problems and should not be printed. If they
are printed, we should fix these usage problems.

Thanks,
Qi


> 
> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
> ---
>   include/linux/fault-inject.h |  2 ++
>   lib/fault-inject.c           |  3 +++
>   mm/failslab.c                |  3 +++
>   mm/internal.h                | 11 +++++++++++
>   mm/page_alloc.c              | 22 ++++++++++++----------
>   5 files changed, 31 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
> index 2d04f6448cde..9f6e25467844 100644
> --- a/include/linux/fault-inject.h
> +++ b/include/linux/fault-inject.h
> @@ -20,6 +20,7 @@ struct fault_attr {
>   	atomic_t space;
>   	unsigned long verbose;
>   	bool task_filter;
> +	bool no_warn;
>   	unsigned long stacktrace_depth;
>   	unsigned long require_start;
>   	unsigned long require_end;
> @@ -39,6 +40,7 @@ struct fault_attr {
>   		.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,	\
>   		.verbose = 2,						\
>   		.dname = NULL,						\
> +		.no_warn = false,					\
>   	}
>   
>   #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
> diff --git a/lib/fault-inject.c b/lib/fault-inject.c
> index ce12621b4275..423784d9c058 100644
> --- a/lib/fault-inject.c
> +++ b/lib/fault-inject.c
> @@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
>   
>   static void fail_dump(struct fault_attr *attr)
>   {
> +	if (attr->no_warn)
> +		return;
> +
>   	if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
>   		printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
>   		       "name %pd, interval %lu, probability %lu, "
> diff --git a/mm/failslab.c b/mm/failslab.c
> index f92fed91ac23..58df9789f1d2 100644
> --- a/mm/failslab.c
> +++ b/mm/failslab.c
> @@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
>   	if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
>   		return false;
>   
> +	if (gfpflags & __GFP_NOWARN)
> +		failslab.attr.no_warn = true;
> +
>   	return should_fail(&failslab.attr, s->object_size);
>   }
>   
> diff --git a/mm/internal.h b/mm/internal.h
> index cf16280ce132..7a268fac6559 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -35,6 +35,17 @@ struct folio_batch;
>   /* Do not use these with a slab allocator */
>   #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
>   
> +#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
> +	static bool __section(".data.once") __warned;			\
> +	int __ret_warn_once = !!(cond);					\
> +									\
> +	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
> +		__warned = true;					\
> +		WARN_ON(1);						\
> +	}								\
> +	unlikely(__ret_warn_once);					\
> +})
> +
>   void page_writeback_init(void);
>   
>   static inline void *folio_raw_mapping(struct folio *folio)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 0e42038382c1..2bf4ce4d0e2f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3722,7 +3722,7 @@ struct page *rmqueue(struct zone *preferred_zone,
>   	 * We most definitely don't want callers attempting to
>   	 * allocate greater than order-1 page units with __GFP_NOFAIL.
>   	 */
> -	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
> +	WARN_ON_ONCE_GFP((gfp_flags & __GFP_NOFAIL) && (order > 1), gfp_flags);
>   
>   	do {
>   		page = NULL;
> @@ -3799,6 +3799,9 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
>   			(gfp_mask & __GFP_DIRECT_RECLAIM))
>   		return false;
>   
> +	if (gfp_mask & __GFP_NOWARN)
> +		fail_page_alloc.attr.no_warn = true;
> +
>   	return should_fail(&fail_page_alloc.attr, 1 << order);
>   }
>   
> @@ -4346,7 +4349,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
>   	 */
>   
>   	/* Exhausted what can be done so it's blame time */
> -	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
> +	if (out_of_memory(&oc) ||
> +	    WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
>   		*did_some_progress = 1;
>   
>   		/*
> @@ -4902,8 +4906,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   	 * We also sanity check to catch abuse of atomic reserves being used by
>   	 * callers that are not in atomic context.
>   	 */
> -	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
> -				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
> +	if (WARN_ON_ONCE_GFP((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
> +				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
>   		gfp_mask &= ~__GFP_ATOMIC;
>   
>   retry_cpuset:
> @@ -5117,7 +5121,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   		 * All existing users of the __GFP_NOFAIL are blockable, so warn
>   		 * of any new users that actually require GFP_NOWAIT
>   		 */
> -		if (WARN_ON_ONCE(!can_direct_reclaim))
> +		if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>   			goto fail;
>   
>   		/*
> @@ -5125,7 +5129,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   		 * because we cannot reclaim anything and only can loop waiting
>   		 * for somebody to do a work for us
>   		 */
> -		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
> +		WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
>   
>   		/*
>   		 * non failing costly orders are a hard requirement which we
> @@ -5133,7 +5137,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   		 * so that we can identify them and convert them to something
>   		 * else.
>   		 */
> -		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
> +		WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
>   
>   		/*
>   		 * Help non-failing allocations by giving them access to memory
> @@ -5379,10 +5383,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
>   	 * There are several places where we assume that the order value is sane
>   	 * so bail out early if the request is out of bound.
>   	 */
> -	if (unlikely(order >= MAX_ORDER)) {
> -		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
> +	if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
>   		return NULL;
> -	}
>   
>   	gfp &= gfp_allowed_mask;
>   	/*
Qi Zheng May 11, 2022, 5:28 a.m. UTC | #6
On 2022/5/11 1:12 PM, Qi Zheng wrote:
> 
> 
> On 2022/5/10 7:38 PM, Qi Zheng wrote:
>> We expect no warnings to be issued when we specify __GFP_NOWARN, but
>> currently in paths like alloc_pages() and kmalloc(), there are still
>> some warnings printed, fix it.
> 
> Hi Andrew,
> 
> Maybe we only need to deal with memory allocation failures, such as
> should_fail() (This leads to deadlock in PATCH[2/2]). These 
> WARN_ON_ONCE()s report usage problems and should not be printed. If they

such as WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1),
gfp_flags).

> are printed, we should fix these usage problems.
> 
> Thanks,
> Qi
> 
> 
>>
>> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
>> ---
>>   include/linux/fault-inject.h |  2 ++
>>   lib/fault-inject.c           |  3 +++
>>   mm/failslab.c                |  3 +++
>>   mm/internal.h                | 11 +++++++++++
>>   mm/page_alloc.c              | 22 ++++++++++++----------
>>   5 files changed, 31 insertions(+), 10 deletions(-)
>>
>> diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
>> index 2d04f6448cde..9f6e25467844 100644
>> --- a/include/linux/fault-inject.h
>> +++ b/include/linux/fault-inject.h
>> @@ -20,6 +20,7 @@ struct fault_attr {
>>       atomic_t space;
>>       unsigned long verbose;
>>       bool task_filter;
>> +    bool no_warn;
>>       unsigned long stacktrace_depth;
>>       unsigned long require_start;
>>       unsigned long require_end;
>> @@ -39,6 +40,7 @@ struct fault_attr {
>>           .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,    \
>>           .verbose = 2,                        \
>>           .dname = NULL,                        \
>> +        .no_warn = false,                    \
>>       }
>>   #define DECLARE_FAULT_ATTR(name) struct fault_attr name = 
>> FAULT_ATTR_INITIALIZER
>> diff --git a/lib/fault-inject.c b/lib/fault-inject.c
>> index ce12621b4275..423784d9c058 100644
>> --- a/lib/fault-inject.c
>> +++ b/lib/fault-inject.c
>> @@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
>>   static void fail_dump(struct fault_attr *attr)
>>   {
>> +    if (attr->no_warn)
>> +        return;
>> +
>>       if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
>>           printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
>>                  "name %pd, interval %lu, probability %lu, "
>> diff --git a/mm/failslab.c b/mm/failslab.c
>> index f92fed91ac23..58df9789f1d2 100644
>> --- a/mm/failslab.c
>> +++ b/mm/failslab.c
>> @@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t 
>> gfpflags)
>>       if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
>>           return false;
>> +    if (gfpflags & __GFP_NOWARN)
>> +        failslab.attr.no_warn = true;
>> +
>>       return should_fail(&failslab.attr, s->object_size);
>>   }
>> diff --git a/mm/internal.h b/mm/internal.h
>> index cf16280ce132..7a268fac6559 100644
>> --- a/mm/internal.h
>> +++ b/mm/internal.h
>> @@ -35,6 +35,17 @@ struct folio_batch;
>>   /* Do not use these with a slab allocator */
>>   #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
>> +#define WARN_ON_ONCE_GFP(cond, gfp)    ({                \
>> +    static bool __section(".data.once") __warned;            \
>> +    int __ret_warn_once = !!(cond);                    \
>> +                                    \
>> +    if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && 
>> !__warned)) { \
>> +        __warned = true;                    \
>> +        WARN_ON(1);                        \
>> +    }                                \
>> +    unlikely(__ret_warn_once);                    \
>> +})
>> +
>>   void page_writeback_init(void);
>>   static inline void *folio_raw_mapping(struct folio *folio)
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 0e42038382c1..2bf4ce4d0e2f 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -3722,7 +3722,7 @@ struct page *rmqueue(struct zone *preferred_zone,
>>        * We most definitely don't want callers attempting to
>>        * allocate greater than order-1 page units with __GFP_NOFAIL.
>>        */
>> -    WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
>> +    WARN_ON_ONCE_GFP((gfp_flags & __GFP_NOFAIL) && (order > 1), 
>> gfp_flags);
>>       do {
>>           page = NULL;
>> @@ -3799,6 +3799,9 @@ static bool __should_fail_alloc_page(gfp_t 
>> gfp_mask, unsigned int order)
>>               (gfp_mask & __GFP_DIRECT_RECLAIM))
>>           return false;
>> +    if (gfp_mask & __GFP_NOWARN)
>> +        fail_page_alloc.attr.no_warn = true;
>> +
>>       return should_fail(&fail_page_alloc.attr, 1 << order);
>>   }
>> @@ -4346,7 +4349,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned 
>> int order,
>>        */
>>       /* Exhausted what can be done so it's blame time */
>> -    if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
>> +    if (out_of_memory(&oc) ||
>> +        WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
>>           *did_some_progress = 1;
>>           /*
>> @@ -4902,8 +4906,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned 
>> int order,
>>        * We also sanity check to catch abuse of atomic reserves being 
>> used by
>>        * callers that are not in atomic context.
>>        */
>> -    if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
>> -                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
>> +    if (WARN_ON_ONCE_GFP((gfp_mask & 
>> (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
>> +                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
>>           gfp_mask &= ~__GFP_ATOMIC;
>>   retry_cpuset:
>> @@ -5117,7 +5121,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned 
>> int order,
>>            * All existing users of the __GFP_NOFAIL are blockable, so 
>> warn
>>            * of any new users that actually require GFP_NOWAIT
>>            */
>> -        if (WARN_ON_ONCE(!can_direct_reclaim))
>> +        if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>>               goto fail;
>>           /*
>> @@ -5125,7 +5129,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned 
>> int order,
>>            * because we cannot reclaim anything and only can loop waiting
>>            * for somebody to do a work for us
>>            */
>> -        WARN_ON_ONCE(current->flags & PF_MEMALLOC);
>> +        WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
>>           /*
>>            * non failing costly orders are a hard requirement which we
>> @@ -5133,7 +5137,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned 
>> int order,
>>            * so that we can identify them and convert them to something
>>            * else.
>>            */
>> -        WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
>> +        WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
>>           /*
>>            * Help non-failing allocations by giving them access to memory
>> @@ -5379,10 +5383,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned 
>> int order, int preferred_nid,
>>        * There are several places where we assume that the order value 
>> is sane
>>        * so bail out early if the request is out of bound.
>>        */
>> -    if (unlikely(order >= MAX_ORDER)) {
>> -        WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
>> +    if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
>>           return NULL;
>> -    }
>>       gfp &= gfp_allowed_mask;
>>       /*
>
diff mbox series

Patch

diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 2d04f6448cde..9f6e25467844 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -20,6 +20,7 @@  struct fault_attr {
 	atomic_t space;
 	unsigned long verbose;
 	bool task_filter;
+	bool no_warn;
 	unsigned long stacktrace_depth;
 	unsigned long require_start;
 	unsigned long require_end;
@@ -39,6 +40,7 @@  struct fault_attr {
 		.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,	\
 		.verbose = 2,						\
 		.dname = NULL,						\
+		.no_warn = false,					\
 	}
 
 #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index ce12621b4275..423784d9c058 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,6 +41,9 @@  EXPORT_SYMBOL_GPL(setup_fault_attr);
 
 static void fail_dump(struct fault_attr *attr)
 {
+	if (attr->no_warn)
+		return;
+
 	if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
 		printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
 		       "name %pd, interval %lu, probability %lu, "
diff --git a/mm/failslab.c b/mm/failslab.c
index f92fed91ac23..58df9789f1d2 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -30,6 +30,9 @@  bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
 	if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
 		return false;
 
+	if (gfpflags & __GFP_NOWARN)
+		failslab.attr.no_warn = true;
+
 	return should_fail(&failslab.attr, s->object_size);
 }
 
diff --git a/mm/internal.h b/mm/internal.h
index cf16280ce132..7a268fac6559 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -35,6 +35,17 @@  struct folio_batch;
 /* Do not use these with a slab allocator */
 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
+#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
+	static bool __section(".data.once") __warned;			\
+	int __ret_warn_once = !!(cond);					\
+									\
+	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
+		__warned = true;					\
+		WARN_ON(1);						\
+	}								\
+	unlikely(__ret_warn_once);					\
+})
+
 void page_writeback_init(void);
 
 static inline void *folio_raw_mapping(struct folio *folio)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0e42038382c1..2bf4ce4d0e2f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3722,7 +3722,7 @@  struct page *rmqueue(struct zone *preferred_zone,
 	 * We most definitely don't want callers attempting to
 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
 	 */
-	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+	WARN_ON_ONCE_GFP((gfp_flags & __GFP_NOFAIL) && (order > 1), gfp_flags);
 
 	do {
 		page = NULL;
@@ -3799,6 +3799,9 @@  static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 			(gfp_mask & __GFP_DIRECT_RECLAIM))
 		return false;
 
+	if (gfp_mask & __GFP_NOWARN)
+		fail_page_alloc.attr.no_warn = true;
+
 	return should_fail(&fail_page_alloc.attr, 1 << order);
 }
 
@@ -4346,7 +4349,8 @@  __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 	 */
 
 	/* Exhausted what can be done so it's blame time */
-	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
+	if (out_of_memory(&oc) ||
+	    WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
 		*did_some_progress = 1;
 
 		/*
@@ -4902,8 +4906,8 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	 * We also sanity check to catch abuse of atomic reserves being used by
 	 * callers that are not in atomic context.
 	 */
-	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
-				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
+	if (WARN_ON_ONCE_GFP((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
+				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM), gfp_mask))
 		gfp_mask &= ~__GFP_ATOMIC;
 
 retry_cpuset:
@@ -5117,7 +5121,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
 		 * of any new users that actually require GFP_NOWAIT
 		 */
-		if (WARN_ON_ONCE(!can_direct_reclaim))
+		if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
 			goto fail;
 
 		/*
@@ -5125,7 +5129,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		 * because we cannot reclaim anything and only can loop waiting
 		 * for somebody to do a work for us
 		 */
-		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
+		WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
 
 		/*
 		 * non failing costly orders are a hard requirement which we
@@ -5133,7 +5137,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		 * so that we can identify them and convert them to something
 		 * else.
 		 */
-		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
+		WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
 
 		/*
 		 * Help non-failing allocations by giving them access to memory
@@ -5379,10 +5383,8 @@  struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 	 * There are several places where we assume that the order value is sane
 	 * so bail out early if the request is out of bound.
 	 */
-	if (unlikely(order >= MAX_ORDER)) {
-		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
+	if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
 		return NULL;
-	}
 
 	gfp &= gfp_allowed_mask;
 	/*