@@ -5380,6 +5380,32 @@ static void build_thisnode_zonelists(pg_data_t *pgdat)
zonerefs->zone_idx = 0;
}
+int build_node_order(int *node_oder_array, int sz,
+ int local_node, nodemask_t *used_mask)
+{
+ int node, nr_nodes = 0;
+ int prev_node = local_node;
+ int load = nr_online_nodes;
+
+
+ while ((node = find_next_best_node(local_node, used_mask)) >= 0
+ && nr_nodes < sz) {
+ /*
+ * We don't want to pressure a particular node.
+ * So adding penalty to the first node in same
+ * distance group to make it round-robin.
+ */
+ if (node_distance(local_node, node) !=
+ node_distance(local_node, prev_node))
+ node_load[node] = load;
+
+ node_oder_array[nr_nodes++] = node;
+ prev_node = node;
+ load--;
+ }
+ return nr_nodes;
+}
+
/*
* Build zonelists ordered by zone and nodes within zones.
* This results in conserving DMA zone[s] until all Normal memory is
@@ -5390,32 +5416,16 @@ static void build_thisnode_zonelists(pg_data_t *pgdat)
static void build_zonelists(pg_data_t *pgdat)
{
static int node_order[MAX_NUMNODES];
- int node, load, nr_nodes = 0;
+ int local_node, nr_nodes = 0;
nodemask_t used_mask;
- int local_node, prev_node;
/* NUMA-aware ordering of nodes */
local_node = pgdat->node_id;
- load = nr_online_nodes;
- prev_node = local_node;
nodes_clear(used_mask);
memset(node_order, 0, sizeof(node_order));
- while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
- /*
- * We don't want to pressure a particular node.
- * So adding penalty to the first node in same
- * distance group to make it round-robin.
- */
- if (node_distance(local_node, node) !=
- node_distance(local_node, prev_node))
- node_load[node] = load;
-
- node_order[nr_nodes++] = node;
- prev_node = node;
- load--;
- }
-
+ nr_nodes = build_node_order(node_order, MAX_NUMNODES,
+ local_node, &used_mask);
build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
build_thisnode_zonelists(pgdat);
}
In coming patch, memblock allocator also utilizes node fall back list info. Hence extracting the related code for reusing. Signed-off-by: Pingfan Liu <kernelfans@gmail.com> CC: Thomas Gleixner <tglx@linutronix.de> CC: Ingo Molnar <mingo@redhat.com> CC: Borislav Petkov <bp@alien8.de> CC: "H. Peter Anvin" <hpa@zytor.com> CC: Dave Hansen <dave.hansen@linux.intel.com> CC: Vlastimil Babka <vbabka@suse.cz> CC: Mike Rapoport <rppt@linux.vnet.ibm.com> CC: Andrew Morton <akpm@linux-foundation.org> CC: Mel Gorman <mgorman@suse.de> CC: Joonsoo Kim <iamjoonsoo.kim@lge.com> CC: Andy Lutomirski <luto@kernel.org> CC: Andi Kleen <ak@linux.intel.com> CC: Petr Tesarik <ptesarik@suse.cz> CC: Michal Hocko <mhocko@suse.com> CC: Stephen Rothwell <sfr@canb.auug.org.au> CC: Jonathan Corbet <corbet@lwn.net> CC: Nicholas Piggin <npiggin@gmail.com> CC: Daniel Vacek <neelx@redhat.com> CC: linux-kernel@vger.kernel.org --- mm/page_alloc.c | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-)