@@ -81,11 +81,20 @@ GLOBAL(l2_directmap)
.size l2_directmap, . - l2_directmap
/*
- * L2 mapping the Xen text/data/bss region, constructed dynamically. Uses 1x
- * 4k page.
+ * L2 mapping the Xen text/data/bss region, constructed dynamically.
+ * Executable fixmap is hooked up statically.
+ * Uses 1x 4k page.
*/
GLOBAL(l2_xenmap)
- .fill L2_PAGETABLE_ENTRIES, 8, 0
+ idx = 0
+ .rept L2_PAGETABLE_ENTRIES
+ .if idx == l2_table_offset(FIXADDR_X_TOP - 1)
+ .quad sym_offs(l1_fixmap_x) + __PAGE_HYPERVISOR
+ .else
+ .quad 0
+ .endif
+ idx = idx + 1
+ .endr
.size l2_xenmap, . - l2_xenmap
/* L2 mapping the fixmap. Uses 1x 4k page. */
@@ -12,6 +12,7 @@
#include <xen/livepatch.h>
#include <xen/sched.h>
+#include <asm/fixmap.h>
#include <asm/nmi.h>
#include <asm/livepatch.h>
@@ -311,7 +312,7 @@ void __init arch_livepatch_init(void)
void *start, *end;
start = (void *)xen_virt_end;
- end = (void *)(XEN_VIRT_END - NR_CPUS * PAGE_SIZE);
+ end = (void *)(XEN_VIRT_END - FIXADDR_X_SIZE - NR_CPUS * PAGE_SIZE);
BUG_ON(end <= start);
@@ -157,6 +157,8 @@
/* Mapping of the fixmap space needed early. */
l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
l1_fixmap[L1_PAGETABLE_ENTRIES];
+l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
+ l1_fixmap_x[L1_PAGETABLE_ENTRIES];
paddr_t __read_mostly mem_hotplug;
@@ -372,6 +374,10 @@ void __init arch_init_memory(void)
}
}
#endif
+
+ /* Generate a symbol to be used in linker script */
+ asm ( ".equ FIXADDR_X_SIZE, %P0; .global FIXADDR_X_SIZE"
+ :: "i" (FIXADDR_X_SIZE) );
}
int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
@@ -5718,10 +5724,17 @@ int destroy_xen_mappings(unsigned long s, unsigned long e)
void __set_fixmap(
enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
{
- BUG_ON(idx >= __end_of_fixed_addresses);
+ BUG_ON(idx >= __end_of_fixed_addresses || idx <= FIX_RESERVED);
map_pages_to_xen(__fix_to_virt(idx), _mfn(mfn), 1, flags);
}
+void __set_fixmap_x(
+ enum fixed_addresses_x idx, unsigned long mfn, unsigned long flags)
+{
+ BUG_ON(idx >= __end_of_fixed_addresses_x || idx <= FIX_X_RESERVED);
+ map_pages_to_xen(__fix_x_to_virt(idx), _mfn(mfn), 1, flags);
+}
+
void *__init arch_vmap_virt_end(void)
{
return fix_to_virt(__end_of_fixed_addresses);
@@ -644,7 +644,7 @@ unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn)
unmap_domain_page(memset(__map_domain_page(pg), 0xcc, PAGE_SIZE));
}
- stub_va = XEN_VIRT_END - (cpu + 1) * PAGE_SIZE;
+ stub_va = XEN_VIRT_END - FIXADDR_X_SIZE - (cpu + 1) * PAGE_SIZE;
if ( map_pages_to_xen(stub_va, page_to_mfn(pg), 1,
PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES) )
{
@@ -2,6 +2,8 @@
/* Modified for i386/x86-64 Xen by Keir Fraser */
#include <xen/cache.h>
+
+#include <asm/fixmap.h>
#include <asm/page.h>
#undef ENTRY
#undef ALIGN
@@ -352,6 +354,7 @@ SECTIONS
}
ASSERT(__2M_rwdata_end <= XEN_VIRT_END - XEN_VIRT_START + __XEN_VIRT_START -
+ FIXADDR_X_SIZE -
NR_CPUS * PAGE_SIZE,
"Xen image overlaps stubs area")
@@ -218,7 +218,7 @@ extern unsigned char boot_edid_info[128];
/* Slot 261: high read-only compat machine-to-phys conversion table (1GB). */
#define HIRO_COMPAT_MPT_VIRT_START RDWR_COMPAT_MPT_VIRT_END
#define HIRO_COMPAT_MPT_VIRT_END (HIRO_COMPAT_MPT_VIRT_START + GB(1))
-/* Slot 261: xen text, static data and bss (1GB). */
+/* Slot 261: xen text, static data, bss, per-cpu stubs and executable fixmap (1GB). */
#define XEN_VIRT_START (HIRO_COMPAT_MPT_VIRT_END)
#define XEN_VIRT_END (XEN_VIRT_START + GB(1))
@@ -15,6 +15,7 @@
#include <asm/page.h>
#define FIXADDR_TOP (VMAP_VIRT_END - PAGE_SIZE)
+#define FIXADDR_X_TOP (XEN_VIRT_END - PAGE_SIZE)
#ifndef __ASSEMBLY__
@@ -89,6 +90,29 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
return __virt_to_fix(vaddr);
}
+enum fixed_addresses_x {
+ /* Index 0 is reserved since fix_x_to_virt(0) == FIXADDR_X_TOP. */
+ FIX_X_RESERVED,
+#ifdef CONFIG_HYPERV_GUEST
+ FIX_X_HYPERV_HCALL,
+#endif
+ __end_of_fixed_addresses_x
+};
+
+#define FIXADDR_X_SIZE (__end_of_fixed_addresses_x << PAGE_SHIFT)
+#define FIXADDR_X_START (FIXADDR_X_TOP - FIXADDR_X_SIZE)
+
+extern void __set_fixmap_x(
+ enum fixed_addresses_x idx, unsigned long mfn, unsigned long flags);
+
+#define set_fixmap_x(idx, phys) \
+ __set_fixmap_x(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES)
+
+#define clear_fixmap_x(idx) __set_fixmap_x(idx, 0, 0)
+
+#define __fix_x_to_virt(x) (FIXADDR_X_TOP - ((x) << PAGE_SHIFT))
+#define fix_x_to_virt(x) ((void *)__fix_x_to_virt(x))
+
#endif /* __ASSEMBLY__ */
#endif
This allows us to set aside some address space for executable mapping. This fixed map range starts from XEN_VIRT_END so that it is within reach of the .text section. Shift the percpu stub range and shrink livepatch range accordingly. Signed-off-by: Wei Liu <liuwe@microsoft.com> --- v6: 1. Move symbol generation snippet to arch_init_memory and use %P0 v5: 1. drop __virt_to_fix_x 2. also check FIX*_RESERVED in __set_fixmap* 3. generate global symbol to be used in linker script 4. address other misc comments --- xen/arch/x86/boot/x86_64.S | 15 ++++++++++++--- xen/arch/x86/livepatch.c | 3 ++- xen/arch/x86/mm.c | 15 ++++++++++++++- xen/arch/x86/smpboot.c | 2 +- xen/arch/x86/xen.lds.S | 3 +++ xen/include/asm-x86/config.h | 2 +- xen/include/asm-x86/fixmap.h | 24 ++++++++++++++++++++++++ 7 files changed, 57 insertions(+), 7 deletions(-)