@@ -861,8 +861,7 @@ void __init start_xen(unsigned long boot_phys_offset,
if ( construct_dom0(dom0) != 0)
panic("Could not set up DOM0 guest OS");
- /* Scrub RAM that is still free and so may go to an unprivileged domain. */
- scrub_heap_pages();
+ heap_init_late();
init_constructors();
@@ -1662,8 +1662,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
cr4_pv32_mask |= X86_CR4_SMAP;
}
- /* Scrub RAM that is still free and so may go to an unprivileged domain. */
- scrub_heap_pages();
+ heap_init_late();
init_trace_bufs();
@@ -1839,7 +1839,7 @@ static int __init find_non_smt(unsigned int node, cpumask_t *dest)
* Scrub all unallocated pages in all heap zones. This function uses all
* online cpu's to scrub the memory in parallel.
*/
-void __init scrub_heap_pages(void)
+static void __init scrub_heap_pages(void)
{
cpumask_t node_cpus, all_worker_cpus;
unsigned int i, j;
@@ -1849,9 +1849,6 @@ void __init scrub_heap_pages(void)
int last_distance, best_node;
int cpus;
- if ( !opt_bootscrub )
- return;
-
cpumask_clear(&all_worker_cpus);
/* Scrub block size. */
chunk_size = opt_bootscrub_chunk >> PAGE_SHIFT;
@@ -1970,12 +1967,19 @@ void __init scrub_heap_pages(void)
#ifdef CONFIG_SCRUB_DEBUG
boot_scrub_done = true;
#endif
+}
- /* Now that the heap is initialized, run checks and set bounds
- * for the low mem virq algorithm. */
+void __init heap_init_late(void)
+{
+ /*
+ * Now that the heap is initialized set bounds
+ * for the low mem virq algorithm.
+ */
setup_low_mem_virq();
-}
+ if ( opt_bootscrub )
+ scrub_heap_pages();
+}
/*************************
@@ -199,7 +199,7 @@ int offline_page(unsigned long mfn, int broken, uint32_t *status);
int query_page_offline(unsigned long mfn, uint32_t *status);
unsigned long total_free_pages(void);
-void scrub_heap_pages(void);
+void heap_init_late(void);
int assign_pages(
struct domain *d,