Message ID | 20240709031953.22217-1-richard.weiyang@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2] mm: use zonelist_zone() to get zone | expand |
On 7/9/2024 8:49 AM, Wei Yang wrote: > Instead of accessing zoneref->zone directly, use zonelist_zone() like > other places for consistency. > > No functional change. > > Signed-off-by: Wei Yang <richard.weiyang@gmail.com> > CC: Mike Rapoport (IBM) <rppt@kernel.org> > CC: David Hildenbrand <david@redhat.com> > CC: Garg Shivank <shivankg@amd.com> > > --- > v2: cover more usage > --- > include/linux/mmzone.h | 4 ++-- > include/trace/events/oom.h | 2 +- > mm/mempolicy.c | 4 ++-- > mm/page_alloc.c | 14 +++++++------- > 4 files changed, 12 insertions(+), 12 deletions(-) > > diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h > index cb7f265c2b96..51bce636373f 100644 > --- a/include/linux/mmzone.h > +++ b/include/linux/mmzone.h > @@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, > zone = zonelist_zone(z)) > > #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ > - for (zone = z->zone; \ > + for (zone = zonelist_zone(z); \ > zone; \ > z = next_zones_zonelist(++z, highidx, nodemask), \ > zone = zonelist_zone(z)) > @@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes) > nid = first_node(*nodes); > zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; > z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); > - return (!z->zone) ? true : false; > + return (!zonelist_zone(z)) ? true : false; > } > > > diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h > index a42be4c8563b..fe6997886b77 100644 > --- a/include/trace/events/oom.h > +++ b/include/trace/events/oom.h > @@ -55,7 +55,7 @@ TRACE_EVENT(reclaim_retry_zone, > ), > > TP_fast_assign( > - __entry->node = zone_to_nid(zoneref->zone); > + __entry->node = zone_to_nid(zonelist_zone(zoneref)); > __entry->zone_idx = zoneref->zone_idx; > __entry->order = order; > __entry->reclaimable = reclaimable; > diff --git a/mm/mempolicy.c b/mm/mempolicy.c > index f73acb01ad45..83e26ded6278 100644 > --- a/mm/mempolicy.c > +++ b/mm/mempolicy.c > @@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void) > zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; > z = first_zones_zonelist(zonelist, highest_zoneidx, > &policy->nodes); > - return z->zone ? zone_to_nid(z->zone) : node; > + return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node; > } > case MPOL_LOCAL: > return node; > @@ -2806,7 +2806,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, > node_zonelist(thisnid, GFP_HIGHUSER), > gfp_zone(GFP_HIGHUSER), > &pol->nodes); > - polnid = zone_to_nid(z->zone); > + polnid = zone_to_nid(zonelist_zone(z)); > break; > > default: > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 116ee33fd1ce..e2933885bb19 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -4218,7 +4218,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, > */ > ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, > ac->highest_zoneidx, ac->nodemask); > - if (!ac->preferred_zoneref->zone) > + if (!zonelist_zone(ac->preferred_zoneref)) > goto nopage; > > /* > @@ -4230,7 +4230,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, > struct zoneref *z = first_zones_zonelist(ac->zonelist, > ac->highest_zoneidx, > &cpuset_current_mems_allowed); > - if (!z->zone) > + if (!zonelist_zone(z)) > goto nopage; > } > > @@ -4587,8 +4587,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, > continue; > } > > - if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && > - zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { > + if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && > + zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) { > goto failed; > } > > @@ -4647,7 +4647,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, > pcp_trylock_finish(UP_flags); > > __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); > - zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); > + zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); > > out: > return nr_populated; > @@ -4705,7 +4705,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, > * Forbid the first pass from falling back to types that fragment > * memory until all local zones are considered. > */ > - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); > + alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); > > /* First allocation attempt */ > page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); > @@ -5310,7 +5310,7 @@ int local_memory_node(int node) > z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), > gfp_zone(GFP_KERNEL), > NULL); > - return zone_to_nid(z->zone); > + return zone_to_nid(zonelist_zone(z)); > } > #endif > Hi Wei, I identified some additional locations where using zonelist_zone and its related functions (zonelist_node_idx and zonelist_zone_idx) would improve code consistency. If it's alright with you, please append below changes to the patch with my tags. Co-Developed-by: Shivank Garg <shivankg@amd.com> Signed-off-by: Shivank Garg <shivankg@amd.com> I have also tested this patch. Thanks, Shivank --- include/trace/events/oom.h | 4 ++-- mm/mempolicy.c | 4 ++-- mm/mmzone.c | 2 +- mm/page_alloc.c | 12 ++++++------ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index fe6997886b77..9f0a5d1482c4 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone, ), TP_fast_assign( - __entry->node = zone_to_nid(zonelist_zone(zoneref)); - __entry->zone_idx = zoneref->zone_idx; + __entry->node = zonelist_node_idx(zoneref); + __entry->zone_idx = zonelist_zone_idx(zoneref); __entry->order = order; __entry->reclaimable = reclaimable; __entry->available = available; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e000f19b3852..ec84a11df1cc 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1953,7 +1953,7 @@ unsigned int mempolicy_slab_node(void) zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->nodes); - return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node; + return zonelist_zone(z) ? zonelist_node_idx(z) : node; } case MPOL_LOCAL: return node; @@ -2802,7 +2802,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, node_zonelist(thisnid, GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->nodes); - polnid = zone_to_nid(zonelist_zone(z)); + polnid = zonelist_node_idx(z); break; default: diff --git a/mm/mmzone.c b/mm/mmzone.c index c01896eca736..f9baa8882fbf 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, z++; else while (zonelist_zone_idx(z) > highest_zoneidx || - (z->zone && !zref_in_nodemask(z, nodes))) + (zonelist_zone(z) && !zref_in_nodemask(z, nodes))) z++; return z; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d841905fa260..e998ff6cbbff 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3336,7 +3336,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } if (no_fallback && nr_online_nodes > 1 && - zone != ac->preferred_zoneref->zone) { + zone != zonelist_zone(ac->preferred_zoneref)) { int local_nid; /* @@ -3344,7 +3344,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, * fragmenting fallbacks. Locality is more important * than fragmentation avoidance. */ - local_nid = zone_to_nid(ac->preferred_zoneref->zone); + local_nid = zonelist_node_idx(ac->preferred_zoneref); if (zone_to_nid(zone) != local_nid) { alloc_flags &= ~ALLOC_NOFRAGMENT; goto retry; @@ -3397,7 +3397,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, goto try_this_zone; if (!node_reclaim_enabled() || - !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) + !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) continue; ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); @@ -3419,7 +3419,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } try_this_zone: - page = rmqueue(ac->preferred_zoneref->zone, zone, order, + page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); @@ -4560,7 +4560,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, } if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && - zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) { + zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { goto failed; } @@ -5282,7 +5282,7 @@ int local_memory_node(int node) z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL); - return zone_to_nid(zonelist_zone(z)); + return zonelist_node_idx(z); } #endif
On Tue, 9 Jul 2024 16:49:47 +0530 "Garg, Shivank" <shivankg@amd.com> wrote: > I identified some additional locations where using zonelist_zone > and its related functions (zonelist_node_idx and zonelist_zone_idx) would > improve code consistency. > > If it's alright with you, please append below changes to the patch with my tags. Thanks. This patch is getting rather large. Can we please revisit this after 6.11-rc1 is released?
On 7/10/2024 3:21 AM, Andrew Morton wrote: > On Tue, 9 Jul 2024 16:49:47 +0530 "Garg, Shivank" <shivankg@amd.com> wrote: > >> I identified some additional locations where using zonelist_zone >> and its related functions (zonelist_node_idx and zonelist_zone_idx) would >> improve code consistency. >> >> If it's alright with you, please append below changes to the patch with my tags. > > Thanks. This patch is getting rather large. Can we please revisit > this after 6.11-rc1 is released? > > Sure, sounds good to me. Thanks, Shivank
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cb7f265c2b96..51bce636373f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, zone = zonelist_zone(z)) #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ - for (zone = z->zone; \ + for (zone = zonelist_zone(z); \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ zone = zonelist_zone(z)) @@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes) nid = first_node(*nodes); zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); - return (!z->zone) ? true : false; + return (!zonelist_zone(z)) ? true : false; } diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index a42be4c8563b..fe6997886b77 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -55,7 +55,7 @@ TRACE_EVENT(reclaim_retry_zone, ), TP_fast_assign( - __entry->node = zone_to_nid(zoneref->zone); + __entry->node = zone_to_nid(zonelist_zone(zoneref)); __entry->zone_idx = zoneref->zone_idx; __entry->order = order; __entry->reclaimable = reclaimable; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f73acb01ad45..83e26ded6278 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void) zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->nodes); - return z->zone ? zone_to_nid(z->zone) : node; + return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node; } case MPOL_LOCAL: return node; @@ -2806,7 +2806,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, node_zonelist(thisnid, GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->nodes); - polnid = zone_to_nid(z->zone); + polnid = zone_to_nid(zonelist_zone(z)); break; default: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 116ee33fd1ce..e2933885bb19 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4218,7 +4218,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, */ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); - if (!ac->preferred_zoneref->zone) + if (!zonelist_zone(ac->preferred_zoneref)) goto nopage; /* @@ -4230,7 +4230,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zoneref *z = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, &cpuset_current_mems_allowed); - if (!z->zone) + if (!zonelist_zone(z)) goto nopage; } @@ -4587,8 +4587,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, continue; } - if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && - zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { + if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && + zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) { goto failed; } @@ -4647,7 +4647,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, pcp_trylock_finish(UP_flags); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); - zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); + zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); out: return nr_populated; @@ -4705,7 +4705,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); @@ -5310,7 +5310,7 @@ int local_memory_node(int node) z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL); - return zone_to_nid(z->zone); + return zone_to_nid(zonelist_zone(z)); } #endif
Instead of accessing zoneref->zone directly, use zonelist_zone() like other places for consistency. No functional change. Signed-off-by: Wei Yang <richard.weiyang@gmail.com> CC: Mike Rapoport (IBM) <rppt@kernel.org> CC: David Hildenbrand <david@redhat.com> CC: Garg Shivank <shivankg@amd.com> --- v2: cover more usage --- include/linux/mmzone.h | 4 ++-- include/trace/events/oom.h | 2 +- mm/mempolicy.c | 4 ++-- mm/page_alloc.c | 14 +++++++------- 4 files changed, 12 insertions(+), 12 deletions(-)