@@ -3040,21 +3040,25 @@ static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
int nr_zones, enum zone_type zone_type)
{
+ enum zone_type z_type = zone_type;
+ struct mem_region *region;
struct zone *zone;
BUG_ON(zone_type >= MAX_NR_ZONES);
zone_type++;
- do {
- zone_type--;
- zone = pgdat->node_zones + zone_type;
- if (populated_zone(zone)) {
- zoneref_set_zone(zone,
- &zonelist->_zonerefs[nr_zones++]);
- check_highest_zone(zone_type);
- }
-
- } while (zone_type);
+ for_each_mem_region_in_node(region, pgdat->node_id) {
+ do {
+ zone_type--;
+ zone = region->region_zones + zone_type;
+ if (populated_zone(zone)) {
+ zoneref_set_zone(zone,
+ &zonelist->_zonerefs[nr_zones++]);
+ check_highest_zone(zone_type);
+ }
+ } while (zone_type);
+ zone_type = z_type + 1;
+ }
return nr_zones;
}
@@ -3275,17 +3279,20 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
int zone_type; /* needs to be signed */
struct zone *z;
struct zonelist *zonelist;
+ struct mem_region *region;
zonelist = &pgdat->node_zonelists[0];
pos = 0;
for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
for (j = 0; j < nr_nodes; j++) {
node = node_order[j];
- z = &NODE_DATA(node)->node_zones[zone_type];
- if (populated_zone(z)) {
- zoneref_set_zone(z,
- &zonelist->_zonerefs[pos++]);
- check_highest_zone(zone_type);
+ for_each_mem_region_in_node(region, node) {
+ z = ®ion->region_zones[zone_type];
+ if (populated_zone(z)) {
+ zoneref_set_zone(z,
+ &zonelist->_zonerefs[pos++]);
+ check_highest_zone(zone_type);
+ }
}
}
}
@@ -3299,6 +3306,8 @@ static int default_zonelist_order(void)
unsigned long low_kmem_size,total_size;
struct zone *z;
int average_size;
+ struct mem_region *region;
+
/*
* ZONE_DMA and ZONE_DMA32 can be very small area in the system.
* If they are really small and used heavily, the system can fall
@@ -3310,12 +3319,15 @@ static int default_zonelist_order(void)
total_size = 0;
for_each_online_node(nid) {
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
- z = &NODE_DATA(nid)->node_zones[zone_type];
- if (populated_zone(z)) {
- if (zone_type < ZONE_NORMAL)
- low_kmem_size += z->present_pages;
- total_size += z->present_pages;
- } else if (zone_type == ZONE_NORMAL) {
+ for_each_mem_region_in_node(region, nid) {
+ z = ®ion->region_zones[zone_type];
+ if (populated_zone(z)) {
+ if (zone_type < ZONE_NORMAL)
+ low_kmem_size +=
+ z->present_pages;
+
+ total_size += z->present_pages;
+ } else if (zone_type == ZONE_NORMAL) {
/*
* If any node has only lowmem, then node order
* is preferred to allow kernel allocations
@@ -3323,7 +3335,8 @@ static int default_zonelist_order(void)
* on other nodes when there is an abundance of
* lowmem available to allocate from.
*/
- return ZONELIST_ORDER_NODE;
+ return ZONELIST_ORDER_NODE;
+ }
}
}
}
@@ -3341,11 +3354,13 @@ static int default_zonelist_order(void)
low_kmem_size = 0;
total_size = 0;
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
- z = &NODE_DATA(nid)->node_zones[zone_type];
- if (populated_zone(z)) {
- if (zone_type < ZONE_NORMAL)
- low_kmem_size += z->present_pages;
- total_size += z->present_pages;
+ for_each_mem_region_in_node(region, nid) {
+ z = ®ion->region_zones[zone_type];
+ if (populated_zone(z)) {
+ if (zone_type < ZONE_NORMAL)
+ low_kmem_size += z->present_pages;
+ total_size += z->present_pages;
+ }
}
}
if (low_kmem_size &&