@@ -185,9 +185,6 @@ static int __apply_alternatives_multi_stop(void *unused)
{
int ret;
struct alt_region region;
- mfn_t xen_mfn = virt_to_mfn(_start);
- paddr_t xen_size = _end - _start;
- unsigned int xen_order = get_order_from_bytes(xen_size);
void *xenmap;
BUG_ON(patched);
@@ -196,8 +193,7 @@ static int __apply_alternatives_multi_stop(void *unused)
* The text and inittext section are read-only. So re-map Xen to
* be able to patch the code.
*/
- xenmap = __vmap(&xen_mfn, 1U << xen_order, 1, 1, PAGE_HYPERVISOR,
- VMAP_DEFAULT);
+ xenmap = xen_map_text_rw();
/* Re-mapping Xen is not expected to fail during boot. */
BUG_ON(!xenmap);
@@ -208,7 +204,7 @@ static int __apply_alternatives_multi_stop(void *unused)
/* The patching is not expected to fail during boot. */
BUG_ON(ret != 0);
- vunmap(xenmap);
+ xen_unmap_text_rw(xenmap);
/* Barriers provided by the cache flushing */
write_atomic(&patched, 1);
@@ -195,6 +195,9 @@ extern void mmu_init_secondary_cpu(void);
extern void setup_xenheap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
/* Map a frame table to cover physical addresses ps through pe */
extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
+/* Create temporary Xen text read-write mapping */
+extern void *xen_map_text_rw(void);
+extern void xen_unmap_text_rw(void *va);
/* Map a 4k page in a fixmap entry */
extern void set_fixmap(unsigned map, mfn_t mfn, unsigned attributes);
/* Remove a mapping from a fixmap entry */
@@ -637,6 +637,31 @@ static void clear_table(void *table)
}
#ifdef CONFIG_COLORING
+void* __init xen_map_text_rw(void)
+{
+ paddr_t xen_paddr = __pa(_start);
+ unsigned int xen_size = 1 << get_order_from_bytes(_end - _start);
+ void *va = vm_alloc(xen_size, 1, VMAP_DEFAULT);
+ unsigned long cur = (unsigned long)va;
+ mfn_t mfn_col;
+ unsigned int i;
+
+ for ( i = 0; i < xen_size; i++, cur += PAGE_SIZE )
+ {
+ xen_paddr = next_xen_colored(xen_paddr);
+ mfn_col = maddr_to_mfn(xen_paddr);
+ if ( map_pages_to_xen(cur, mfn_col, 1, PAGE_HYPERVISOR) )
+ return NULL;
+ xen_paddr += PAGE_SIZE;
+ }
+ return va;
+}
+
+void __init xen_unmap_text_rw(void *va)
+{
+ vunmap(va);
+}
+
/*
* Translate a Xen (.text) virtual address to the colored physical one
* depending on the hypervisor configuration.
@@ -796,6 +821,19 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
xen_pt_enforce_wnx();
}
#else
+void* __init xen_map_text_rw(void)
+{
+ unsigned int xen_order = get_order_from_bytes(_end - _start);
+ mfn_t xen_mfn = virt_to_mfn(_start);
+ return __vmap(&xen_mfn, 1U << xen_order, 1, 1, PAGE_HYPERVISOR,
+ VMAP_DEFAULT);
+}
+
+void __init xen_unmap_text_rw(void *va)
+{
+ vunmap(va);
+}
+
/* Boot-time pagetable setup.
* Changes here may need matching changes in head.S */
void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
@@ -45,8 +45,8 @@ void __init vm_init_type(enum vmap_region type, void *start, void *end)
populate_pt_range(va, vm_low[type] - nr);
}
-static void *vm_alloc(unsigned int nr, unsigned int align,
- enum vmap_region t)
+void *vm_alloc(unsigned int nr, unsigned int align,
+ enum vmap_region t)
{
unsigned int start, bit;
@@ -12,6 +12,8 @@ enum vmap_region {
void vm_init_type(enum vmap_region type, void *start, void *end);
+void *vm_alloc(unsigned int nr, unsigned int align,
+ enum vmap_region t);
void *__vmap(const mfn_t *mfn, unsigned int granularity, unsigned int nr,
unsigned int align, unsigned int flags, enum vmap_region);
void *vmap(const mfn_t *mfn, unsigned int nr);