@@ -276,7 +276,6 @@ srm_paging_stop (void)
void __init
mem_init(void)
{
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
}
@@ -150,8 +150,6 @@ void __init setup_arch_memory(void)
*/
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
- high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
-
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
#endif /* CONFIG_HIGHMEM */
@@ -314,8 +314,6 @@ void __init arm64_memblock_init(void)
}
early_init_fdt_scan_reserved_mem();
-
- high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
void __init bootmem_init(void)
@@ -47,7 +47,6 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
@@ -100,12 +100,6 @@ static void __init paging_init(void)
* initial kernel segment table's physical address.
*/
init_mm.context.ptbase = __pa(init_mm.pgd);
-
- /*
- * Start of high memory area. Will probably need something more
- * fancy if we... get more fancy.
- */
- high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
}
#ifndef DMA_RESERVE
@@ -389,7 +389,6 @@ void __init paging_init(void)
void __init mem_init(void)
{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
}
@@ -78,8 +78,6 @@ void __init paging_init(void)
void __init mem_init(void)
{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
memblock_free_all();
}
#endif /* !CONFIG_NUMA */
@@ -120,8 +120,6 @@ void __init setup_memory(void)
void __init mem_init(void)
{
- high_memory = (void *)__va(memory_start + lowmem_size - 1);
-
/* this will put all memory onto the freelists */
memblock_free_all();
#ifdef CONFIG_HIGHMEM
@@ -417,7 +417,6 @@ void __init paging_init(void)
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
}
#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
free_area_init(max_zone_pfns);
}
@@ -469,7 +468,6 @@ void __init mem_init(void)
#else /* CONFIG_NUMA */
void __init mem_init(void)
{
- high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
setup_zero_pages(); /* This comes from node 0 */
memblock_free_all();
}
@@ -62,12 +62,6 @@ void __init paging_init(void)
void __init mem_init(void)
{
- unsigned long end_mem = memory_end; /* this must not include
- kernel stack at top */
-
- end_mem &= PAGE_MASK;
- high_memory = __va(end_mem);
-
/* this will put all memory onto the freelists */
memblock_free_all();
}
@@ -193,8 +193,6 @@ void __init mem_init(void)
{
BUG_ON(!mem_map);
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
-
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
@@ -562,7 +562,6 @@ void __init mem_init(void)
BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
#endif
- high_memory = __va((max_pfn << PAGE_SHIFT));
memblock_free_all();
#ifdef CONFIG_PA11
@@ -295,7 +295,6 @@ static void __init setup_bootmem(void)
phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
- high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
@@ -159,8 +159,6 @@ void __init mem_init(void)
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
pv_init();
kfence_split_mapping();
@@ -330,13 +330,6 @@ unsigned int mem_init_done = 0;
void __init mem_init(void)
{
- pg_data_t *pgdat;
-
- high_memory = NULL;
- for_each_online_pgdat(pgdat)
- high_memory = max_t(void *, high_memory,
- __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
-
memblock_free_all();
/* Set this up early, so we can take care of the zero page */
@@ -275,7 +275,6 @@ void __init mem_init(void)
taint_real_pages();
- high_memory = __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
@@ -2505,8 +2505,6 @@ static void __init register_page_bootmem_info(void)
}
void __init mem_init(void)
{
- high_memory = __va(last_valid_pfn << PAGE_SHIFT);
-
memblock_free_all();
/*
@@ -385,7 +385,6 @@ int __init linux_main(int argc, char **argv, char **envp)
high_physmem = uml_physmem + physmem_size;
end_iomem = high_physmem + iomem_size;
- high_memory = (void *) end_iomem;
start_vm = VMALLOC_START;
@@ -972,8 +972,6 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = e820__end_of_low_ram_pfn();
else
max_low_pfn = max_pfn;
-
- high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
#endif
/* Find and reserve MPTABLE area */
@@ -643,9 +643,6 @@ void __init initmem_init(void)
highstart_pfn = max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-#else
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
@@ -41,9 +41,6 @@ void __init initmem_init(void)
highstart_pfn = max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
- high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-#else
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
pages_to_mb(max_low_pfn));
@@ -164,8 +164,6 @@ void __init mem_init(void)
{
free_highpages();
- high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
-
memblock_free_all();
}
@@ -113,14 +113,6 @@ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
return pte_marker_uffd_wp(vmf->orig_pte);
}
-/*
- * A number of key systems in x86 including ioremap() rely on the assumption
- * that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL.
- */
-void *high_memory;
-EXPORT_SYMBOL(high_memory);
-
/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
@@ -44,6 +44,13 @@ struct page *mem_map;
EXPORT_SYMBOL(mem_map);
#endif
+/*
+ * high_memory defines the upper bound on direct map memory, then end
+ * of ZONE_NORMAL.
+ */
+void *high_memory;
+EXPORT_SYMBOL(high_memory);
+
#ifdef CONFIG_DEBUG_MEMORY_INIT
int __meminitdata mminit_loglevel;
@@ -1756,6 +1763,27 @@ static bool arch_has_descending_max_zone_pfns(void)
return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
}
+static void set_high_memory(void)
+{
+ phys_addr_t highmem = memblock_end_of_DRAM();
+
+ /*
+ * Some architectures (e.g. ARM) set high_memory very early and
+ * use it in arch setup code.
+ * If an architecture already set high_memory don't overwrite it
+ */
+ if (high_memory)
+ return;
+
+#ifdef CONFIG_HIGHMEM
+ if (arch_has_descending_max_zone_pfns() ||
+ highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]))
+ highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]);
+#endif
+
+ high_memory = phys_to_virt(highmem - 1) + 1;
+}
+
/**
* free_area_init - Initialise all pg_data_t and zone data
* @max_zone_pfn: an array of max PFNs for each zone
@@ -1875,6 +1903,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
/* disable hash distribution for systems with a single node */
fixup_hashdist();
+
+ set_high_memory();
}
/**
@@ -42,8 +42,6 @@
#include <asm/mmu_context.h>
#include "internal.h"
-void *high_memory;
-EXPORT_SYMBOL(high_memory);
unsigned long highest_memmap_pfn;
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
int heap_stack_gap = 0;