diff mbox series

[v2,14/15] mm: Pass order to try_to_free_pages in GFP flags

Message ID 20190510135038.17129-15-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Remove 'order' argument from many mm functions | expand

Commit Message

Matthew Wilcox May 10, 2019, 1:50 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Also remove the order argument from __perform_reclaim() and
__alloc_pages_direct_reclaim() which only passed the argument down.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/swap.h          |  2 +-
 include/trace/events/vmscan.h | 20 +++++++++-----------
 mm/page_alloc.c               | 15 ++++++---------
 mm/vmscan.c                   | 13 ++++++-------
 4 files changed, 22 insertions(+), 28 deletions(-)

Comments

Ira Weiny May 10, 2019, 11:26 p.m. UTC | #1
On Fri, May 10, 2019 at 06:50:37AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Also remove the order argument from __perform_reclaim() and
> __alloc_pages_direct_reclaim() which only passed the argument down.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/swap.h          |  2 +-
>  include/trace/events/vmscan.h | 20 +++++++++-----------
>  mm/page_alloc.c               | 15 ++++++---------
>  mm/vmscan.c                   | 13 ++++++-------
>  4 files changed, 22 insertions(+), 28 deletions(-)
> 
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 4bfb5c4ac108..029737fec38b 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -348,7 +348,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
>  
>  /* linux/mm/vmscan.c */
>  extern unsigned long zone_reclaimable_pages(struct zone *zone);
> -extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> +extern unsigned long try_to_free_pages(struct zonelist *zonelist,
>  					gfp_t gfp_mask, nodemask_t *mask);
>  extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
>  extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
> index a5ab2973e8dc..a6b1b20333b4 100644
> --- a/include/trace/events/vmscan.h
> +++ b/include/trace/events/vmscan.h
> @@ -100,45 +100,43 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
>  
>  DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
>  
> -	TP_PROTO(int order, gfp_t gfp_flags),
> +	TP_PROTO(gfp_t gfp_flags),
>  
> -	TP_ARGS(order, gfp_flags),
> +	TP_ARGS(gfp_flags),
>  
>  	TP_STRUCT__entry(
> -		__field(	int,	order		)
>  		__field(	gfp_t,	gfp_flags	)
>  	),
>  
>  	TP_fast_assign(
> -		__entry->order		= order;
>  		__entry->gfp_flags	= gfp_flags;
>  	),
>  
>  	TP_printk("order=%d gfp_flags=%s",
> -		__entry->order,
> +		gfp_order(__entry->gfp_flags),
>  		show_gfp_flags(__entry->gfp_flags))
>  );
>  
>  DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
>  
> -	TP_PROTO(int order, gfp_t gfp_flags),
> +	TP_PROTO(gfp_t gfp_flags),
>  
> -	TP_ARGS(order, gfp_flags)
> +	TP_ARGS(gfp_flags)
>  );
>  
>  #ifdef CONFIG_MEMCG
>  DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
>  
> -	TP_PROTO(int order, gfp_t gfp_flags),
> +	TP_PROTO(gfp_t gfp_flags),
>  
> -	TP_ARGS(order, gfp_flags)
> +	TP_ARGS(gfp_flags)
>  );
>  
>  DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
>  
> -	TP_PROTO(int order, gfp_t gfp_flags),
> +	TP_PROTO(gfp_t gfp_flags),
>  
> -	TP_ARGS(order, gfp_flags)
> +	TP_ARGS(gfp_flags)
>  );
>  #endif /* CONFIG_MEMCG */
>  
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d457dfa8a0ac..29daaf4ae4fb 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4024,9 +4024,7 @@ EXPORT_SYMBOL_GPL(fs_reclaim_release);
>  #endif
>  
>  /* Perform direct synchronous page reclaim */
> -static int
> -__perform_reclaim(gfp_t gfp_mask, unsigned int order,
> -					const struct alloc_context *ac)
> +static int __perform_reclaim(gfp_t gfp_mask, const struct alloc_context *ac)
>  {
>  	struct reclaim_state reclaim_state;
>  	int progress;
> @@ -4043,8 +4041,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
>  	reclaim_state.reclaimed_slab = 0;
>  	current->reclaim_state = &reclaim_state;
>  
> -	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
> -								ac->nodemask);
> +	progress = try_to_free_pages(ac->zonelist, gfp_mask, ac->nodemask);
>  
>  	current->reclaim_state = NULL;
>  	memalloc_noreclaim_restore(noreclaim_flag);
> @@ -4058,14 +4055,14 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
>  
>  /* The really slow allocator path where we enter direct reclaim */
>  static inline struct page *
> -__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
> -		unsigned int alloc_flags, const struct alloc_context *ac,
> +__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int alloc_flags,
> +		const struct alloc_context *ac,
>  		unsigned long *did_some_progress)
>  {
>  	struct page *page = NULL;
>  	bool drained = false;
>  
> -	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
> +	*did_some_progress = __perform_reclaim(gfp_mask, ac);
>  	if (unlikely(!(*did_some_progress)))
>  		return NULL;
>  
> @@ -4458,7 +4455,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>  		goto nopage;
>  
>  	/* Try direct reclaim and then allocating */
> -	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
> +	page = __alloc_pages_direct_reclaim(gfp_mask, alloc_flags, ac,
>  							&did_some_progress);
>  	if (page)
>  		goto got_pg;
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d9c3e873eca6..e4d4d9c1d7a9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3182,15 +3182,15 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
>  	return false;
>  }
>  
> -unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> -				gfp_t gfp_mask, nodemask_t *nodemask)
> +unsigned long try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
> +		nodemask_t *nodemask)
>  {
>  	unsigned long nr_reclaimed;
>  	struct scan_control sc = {
>  		.nr_to_reclaim = SWAP_CLUSTER_MAX,
>  		.gfp_mask = current_gfp_context(gfp_mask),
>  		.reclaim_idx = gfp_zone(gfp_mask),
> -		.order = order,
> +		.order = gfp_order(gfp_mask),

NIT: Could we remove order from scan_control?

Ira

>  		.nodemask = nodemask,
>  		.priority = DEF_PRIORITY,
>  		.may_writepage = !laptop_mode,
> @@ -3215,7 +3215,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
>  	if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
>  		return 1;
>  
> -	trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
> +	trace_mm_vmscan_direct_reclaim_begin(sc.gfp_mask);
>  
>  	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>  
> @@ -3244,8 +3244,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
>  	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
>  			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
>  
> -	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
> -						      sc.gfp_mask);
> +	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.gfp_mask);
>  
>  	/*
>  	 * NOTE: Although we can get the priority field, using it
> @@ -3294,7 +3293,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>  
>  	zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
>  
> -	trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
> +	trace_mm_vmscan_memcg_reclaim_begin(sc.gfp_mask);
>  
>  	psi_memstall_enter(&pflags);
>  	noreclaim_flag = memalloc_noreclaim_save();
> -- 
> 2.20.1
>
diff mbox series

Patch

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4bfb5c4ac108..029737fec38b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -348,7 +348,7 @@  extern void lru_cache_add_active_or_unevictable(struct page *page,
 
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
-extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist,
 					gfp_t gfp_mask, nodemask_t *mask);
 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index a5ab2973e8dc..a6b1b20333b4 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -100,45 +100,43 @@  TRACE_EVENT(mm_vmscan_wakeup_kswapd,
 
 DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
 
-	TP_PROTO(int order, gfp_t gfp_flags),
+	TP_PROTO(gfp_t gfp_flags),
 
-	TP_ARGS(order, gfp_flags),
+	TP_ARGS(gfp_flags),
 
 	TP_STRUCT__entry(
-		__field(	int,	order		)
 		__field(	gfp_t,	gfp_flags	)
 	),
 
 	TP_fast_assign(
-		__entry->order		= order;
 		__entry->gfp_flags	= gfp_flags;
 	),
 
 	TP_printk("order=%d gfp_flags=%s",
-		__entry->order,
+		gfp_order(__entry->gfp_flags),
 		show_gfp_flags(__entry->gfp_flags))
 );
 
 DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
 
-	TP_PROTO(int order, gfp_t gfp_flags),
+	TP_PROTO(gfp_t gfp_flags),
 
-	TP_ARGS(order, gfp_flags)
+	TP_ARGS(gfp_flags)
 );
 
 #ifdef CONFIG_MEMCG
 DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
 
-	TP_PROTO(int order, gfp_t gfp_flags),
+	TP_PROTO(gfp_t gfp_flags),
 
-	TP_ARGS(order, gfp_flags)
+	TP_ARGS(gfp_flags)
 );
 
 DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
 
-	TP_PROTO(int order, gfp_t gfp_flags),
+	TP_PROTO(gfp_t gfp_flags),
 
-	TP_ARGS(order, gfp_flags)
+	TP_ARGS(gfp_flags)
 );
 #endif /* CONFIG_MEMCG */
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d457dfa8a0ac..29daaf4ae4fb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4024,9 +4024,7 @@  EXPORT_SYMBOL_GPL(fs_reclaim_release);
 #endif
 
 /* Perform direct synchronous page reclaim */
-static int
-__perform_reclaim(gfp_t gfp_mask, unsigned int order,
-					const struct alloc_context *ac)
+static int __perform_reclaim(gfp_t gfp_mask, const struct alloc_context *ac)
 {
 	struct reclaim_state reclaim_state;
 	int progress;
@@ -4043,8 +4041,7 @@  __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 	reclaim_state.reclaimed_slab = 0;
 	current->reclaim_state = &reclaim_state;
 
-	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
-								ac->nodemask);
+	progress = try_to_free_pages(ac->zonelist, gfp_mask, ac->nodemask);
 
 	current->reclaim_state = NULL;
 	memalloc_noreclaim_restore(noreclaim_flag);
@@ -4058,14 +4055,14 @@  __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 
 /* The really slow allocator path where we enter direct reclaim */
 static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
-		unsigned int alloc_flags, const struct alloc_context *ac,
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int alloc_flags,
+		const struct alloc_context *ac,
 		unsigned long *did_some_progress)
 {
 	struct page *page = NULL;
 	bool drained = false;
 
-	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
+	*did_some_progress = __perform_reclaim(gfp_mask, ac);
 	if (unlikely(!(*did_some_progress)))
 		return NULL;
 
@@ -4458,7 +4455,7 @@  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		goto nopage;
 
 	/* Try direct reclaim and then allocating */
-	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
+	page = __alloc_pages_direct_reclaim(gfp_mask, alloc_flags, ac,
 							&did_some_progress);
 	if (page)
 		goto got_pg;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d9c3e873eca6..e4d4d9c1d7a9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3182,15 +3182,15 @@  static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 	return false;
 }
 
-unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-				gfp_t gfp_mask, nodemask_t *nodemask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
+		nodemask_t *nodemask)
 {
 	unsigned long nr_reclaimed;
 	struct scan_control sc = {
 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
 		.gfp_mask = current_gfp_context(gfp_mask),
 		.reclaim_idx = gfp_zone(gfp_mask),
-		.order = order,
+		.order = gfp_order(gfp_mask),
 		.nodemask = nodemask,
 		.priority = DEF_PRIORITY,
 		.may_writepage = !laptop_mode,
@@ -3215,7 +3215,7 @@  unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 	if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
 		return 1;
 
-	trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
+	trace_mm_vmscan_direct_reclaim_begin(sc.gfp_mask);
 
 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
@@ -3244,8 +3244,7 @@  unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
-	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
-						      sc.gfp_mask);
+	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.gfp_mask);
 
 	/*
 	 * NOTE: Although we can get the priority field, using it
@@ -3294,7 +3293,7 @@  unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 
 	zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
 
-	trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
+	trace_mm_vmscan_memcg_reclaim_begin(sc.gfp_mask);
 
 	psi_memstall_enter(&pflags);
 	noreclaim_flag = memalloc_noreclaim_save();