@@ -764,7 +764,7 @@ void __init paging_init(void)
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_memory_present_with_active_regions(NUMA_NO_NODE);
sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP
@@ -142,7 +142,7 @@ static void __init get_node_active_region(unsigned long pfn,
unsigned long start_pfn, end_pfn;
int i, nid;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ for_each_mem_pfn_range(i, NUMA_NO_NODE, &start_pfn, &end_pfn, &nid) {
if (pfn >= start_pfn && pfn < end_pfn) {
node_ar->nid = nid;
node_ar->start_pfn = start_pfn;
@@ -126,7 +126,7 @@ void __init paging_init(void)
atomic_set(&init_mm.context.attach_count, 1);
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_memory_present_with_active_regions(NUMA_NO_NODE);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
@@ -1346,7 +1346,7 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
/* XXX cpu notifier XXX */
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_memory_present_with_active_regions(NUMA_NO_NODE);
sparse_init();
return end_pfn;
@@ -91,7 +91,7 @@ void __init setup_bios_corruption_check(void)
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
- for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) {
start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
PAGE_SIZE, corruption_check_size);
end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
@@ -1114,13 +1114,13 @@ void __init memblock_find_dma_reserve(void)
* need to use memblock to get free size in [0, MAX_DMA_PFN]
* at first, and assume boot_mem will not take below MAX_DMA_PFN
*/
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
+ for_each_mem_pfn_range(i, NUMA_NO_NODE, &start_pfn, &end_pfn, NULL) {
start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
nr_pages += end_pfn - start_pfn;
}
- for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
+ for_each_free_mem_range(u, NUMA_NO_NODE, &start, &end, NULL) {
start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
if (start_pfn < end_pfn)
@@ -379,7 +379,7 @@ static unsigned long __init init_range_memory_mapping(
unsigned long mapped_ram_size = 0;
int i;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
+ for_each_mem_pfn_range(i, NUMA_NO_NODE, &start_pfn, &end_pfn, NULL) {
u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
if (start >= end)
@@ -706,7 +706,7 @@ void __init paging_init(void)
* NOTE: at this point the bootmem allocator is fully available.
*/
olpc_dt_build_devicetree();
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_memory_present_with_active_regions(NUMA_NO_NODE);
sparse_init();
zone_sizes_init();
}
@@ -649,7 +649,7 @@ void __init initmem_init(void)
void __init paging_init(void)
{
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_memory_present_with_active_regions(NUMA_NO_NODE);
sparse_init();
/*
@@ -74,7 +74,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end)
u64 i;
phys_addr_t this_start, this_end;
- for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
+ for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) {
this_start = clamp_t(phys_addr_t, this_start, start, end);
this_end = clamp_t(phys_addr_t, this_end, start, end);
if (this_start < this_end) {
@@ -561,7 +561,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
+ WARN_ON(memblock_set_node(0, ULLONG_MAX, NUMA_NO_NODE));
numa_reset_distance();
ret = init_func();
@@ -148,18 +148,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#define BOOTMEM_ALLOC_ACCESSIBLE 0
#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
-/*
- * FIXME: use NUMA_NO_NODE instead of MAX_NUMNODES when bootmem/nobootmem code
- * will be removed.
- * It can't be done now, because when MEMBLOCK or NO_BOOTMEM are not enabled
- * all calls of the new API will be redirected to bottmem/nobootmem where
- * MAX_NUMNODES is widely used.
- * Also, memblock core APIs __next_free_mem_range_rev() and
- * __next_free_mem_range() would need to be updated, and as result we will
- * need to re-check/update all direct calls of memblock_alloc_xxx()
- * APIs (including nobootmem).
- */
-
/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
phys_addr_t align, phys_addr_t from,
@@ -171,20 +159,20 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
#define memblock_virt_alloc(x) \
memblock_virt_alloc_try_nid(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT, \
- BOOTMEM_ALLOC_ACCESSIBLE, MAX_NUMNODES)
+ BOOTMEM_ALLOC_ACCESSIBLE, NUMA_NO_NODE)
#define memblock_virt_alloc_align(x, align) \
memblock_virt_alloc_try_nid(x, align, BOOTMEM_LOW_LIMIT, \
- BOOTMEM_ALLOC_ACCESSIBLE, MAX_NUMNODES)
+ BOOTMEM_ALLOC_ACCESSIBLE, NUMA_NO_NODE)
#define memblock_virt_alloc_nopanic(x) \
memblock_virt_alloc_try_nid_nopanic(x, SMP_CACHE_BYTES, \
BOOTMEM_LOW_LIMIT, \
BOOTMEM_ALLOC_ACCESSIBLE, \
- MAX_NUMNODES)
+ NUMA_NO_NODE)
#define memblock_virt_alloc_align_nopanic(x, align) \
memblock_virt_alloc_try_nid_nopanic(x, align, \
BOOTMEM_LOW_LIMIT, \
BOOTMEM_ALLOC_ACCESSIBLE, \
- MAX_NUMNODES)
+ NUMA_NO_NODE)
#define memblock_virt_alloc_node(x, nid) \
memblock_virt_alloc_try_nid(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT, \
BOOTMEM_ALLOC_ACCESSIBLE, nid)
@@ -69,7 +69,7 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
/**
* for_each_mem_pfn_range - early memory pfn range iterator
* @i: an integer used as loop variable
- * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
@@ -87,7 +87,7 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
/**
* for_each_free_mem_range - iterate through free memblock areas
* @i: u64 used as loop variable
- * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
@@ -107,7 +107,7 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
* @i: u64 used as loop variable
- * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
@@ -94,7 +94,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
* @size: size of free area to find
* @align: alignment of free area to find
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* Utility called from memblock_find_in_range_node(), find free area bottom-up.
*
@@ -126,7 +126,7 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
* @size: size of free area to find
* @align: alignment of free area to find
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* Utility called from memblock_find_in_range_node(), find free area top-down.
*
@@ -161,7 +161,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
* @size: size of free area to find
* @align: alignment of free area to find
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* Find @size free area aligned to @align in the specified range and node.
*
@@ -242,7 +242,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
phys_addr_t align)
{
return memblock_find_in_range_node(start, end, size, align,
- MAX_NUMNODES);
+ NUMA_NO_NODE);
}
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
@@ -258,7 +258,7 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
type->cnt = 1;
type->regions[0].base = 0;
type->regions[0].size = 0;
- memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
+ memblock_set_region_node(&type->regions[0], NUMA_NO_NODE);
}
}
@@ -558,7 +558,7 @@ int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
- return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
+ return memblock_add_region(&memblock.memory, base, size, NUMA_NO_NODE);
}
/**
@@ -674,13 +674,13 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
(unsigned long long)base + size - 1,
(void *)_RET_IP_);
- return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
+ return memblock_add_region(_rgn, base, size, NUMA_NO_NODE);
}
/**
* __next_free_mem_range - next function for for_each_free_mem_range()
* @idx: pointer to u64 loop variable
- * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @out_nid: ptr to int for nid of the range, can be %NULL
@@ -715,7 +715,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
phys_addr_t m_end = m->base + m->size;
/* only memory regions are associated with nodes, check it */
- if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
continue;
/* scan areas before each reservation for intersection */
@@ -756,7 +756,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
/**
* __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
* @idx: pointer to u64 loop variable
- * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
* @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @out_nid: ptr to int for nid of the range, can be %NULL
@@ -783,7 +783,7 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
phys_addr_t m_end = m->base + m->size;
/* only memory regions are associated with nodes, check it */
- if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
continue;
/* scan areas before each reservation for intersection */
@@ -833,7 +833,7 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
continue;
- if (nid == MAX_NUMNODES || nid == r->nid)
+ if (nid == NUMA_NO_NODE || nid == r->nid)
break;
}
if (*idx >= type->cnt) {
@@ -906,7 +906,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
- return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
+ return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
}
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
@@ -945,7 +945,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* The @from limit is dropped if it can not be satisfied and the allocation
* will fall back to memory below @from.
@@ -971,10 +971,7 @@ static void * __init _memblock_virt_alloc_try_nid_nopanic(
void *ptr;
if (WARN_ON_ONCE(slab_is_available())) {
- if (nid == MAX_NUMNODES)
- return kzalloc(size, GFP_NOWAIT);
- else
- return kzalloc_node(size, GFP_NOWAIT, nid);
+ return kzalloc_node(size, GFP_NOWAIT, nid);
}
if (!align)
@@ -988,9 +985,9 @@ again:
if (alloc)
goto done;
- if (nid != MAX_NUMNODES) {
+ if (nid != NUMA_NO_NODE) {
alloc = memblock_find_in_range_node(from, max_addr, size,
- align, MAX_NUMNODES);
+ align, NUMA_NO_NODE);
if (alloc)
goto done;
}
@@ -1028,7 +1025,7 @@ error:
* @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
* additional debug information (including caller info), if enabled.
@@ -1057,7 +1054,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
* @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
- * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
* which provides debug information (including caller info), if enabled,
@@ -1320,7 +1317,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
base = rgn->base;
size = rgn->size;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
- if (memblock_get_region_node(rgn) != MAX_NUMNODES)
+ if (memblock_get_region_node(rgn) != NUMA_NO_NODE)
snprintf(nid_buf, sizeof(nid_buf), " on node %d",
memblock_get_region_node(rgn));
#endif
@@ -117,7 +117,7 @@ static unsigned long __init free_low_memory_core_early(void)
phys_addr_t start, end, size;
u64 i;
- for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+ for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL)
count += __free_memory_core(start, end);
/* free range that is used for reserved array if we allocate it */
@@ -161,7 +161,7 @@ unsigned long __init free_all_bootmem(void)
reset_all_zones_managed_pages();
/*
- * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
+ * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
* because in some case like Node0 doesn't have RAM installed
* low ram will be on Node1
*/
@@ -215,7 +215,7 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
restart:
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+ ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
if (ptr)
return ptr;
@@ -299,7 +299,7 @@ again:
if (ptr)
return ptr;
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
+ ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
goal, limit);
if (ptr)
return ptr;
@@ -4347,7 +4347,7 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
/**
* free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
- * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
+ * @nid: The node to free memory on. If NUMA_NO_NODE, all nodes are freed.
* @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
*
* If an architecture guarantees that all ranges registered with
@@ -4373,7 +4373,7 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
- * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
+ * @nid: The node to call memory_present for. If NUMA_NO_NODE, all nodes will be used.
*
* If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this
@@ -4390,7 +4390,7 @@ void __init sparse_memory_present_with_active_regions(int nid)
/**
* get_pfn_range_for_nid - Return the start and end page frames for a node
- * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
+ * @nid: The nid to return the range for. If NUMA_NO_NODE, the min and max PFN are returned.
* @start_pfn: Passed by reference. On return, it will have the node start_pfn.
* @end_pfn: Passed by reference. On return, it will have the node end_pfn.
*
@@ -4506,7 +4506,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
}
/*
- * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
+ * Return the number of holes in a range on a node. If nid is NUMA_NO_NODE,
* then all holes in the requested range will be accounted for.
*/
unsigned long __meminit __absent_pages_in_range(int nid,
@@ -4535,7 +4535,7 @@ unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long __init absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn)
{
- return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
+ return __absent_pages_in_range(NUMA_NO_NODE, start_pfn, end_pfn);
}
/* Return the number of page frames in holes in a zone on a node */
@@ -4926,7 +4926,7 @@ unsigned long __init node_map_pfn_alignment(void)
int last_nid = -1;
int i, nid;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
+ for_each_mem_pfn_range(i, NUMA_NO_NODE, &start, &end, &nid) {
if (!start || last_nid < 0 || last_nid == nid) {
last_nid = nid;
last_end = end;
@@ -4977,7 +4977,7 @@ static unsigned long __init find_min_pfn_for_node(int nid)
*/
unsigned long __init find_min_pfn_with_active_regions(void)
{
- return find_min_pfn_for_node(MAX_NUMNODES);
+ return find_min_pfn_for_node(NUMA_NO_NODE);
}
/*
@@ -4991,7 +4991,7 @@ static unsigned long __init early_calculate_totalpages(void)
unsigned long start_pfn, end_pfn;
int i, nid;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ for_each_mem_pfn_range(i, NUMA_NO_NODE, &start_pfn, &end_pfn, &nid) {
unsigned long pages = end_pfn - start_pfn;
totalpages += pages;
@@ -5231,7 +5231,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Print out the early node map */
printk("Early memory node ranges\n");
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+ for_each_mem_pfn_range(i, NUMA_NO_NODE, &start_pfn, &end_pfn, &nid)
printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
@@ -1853,7 +1853,7 @@ static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
return memblock_virt_alloc_try_nid_nopanic(size, align,
__pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE,
- MAX_NUMNODES);
+ NUMA_NO_NODE);
}
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
@@ -1905,7 +1905,7 @@ void __init setup_per_cpu_areas(void)
PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE,
- MAX_NUMNODES);
+ NUMA_NO_NODE);
if (!ai || !fc)
panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */