@@ -299,7 +299,7 @@ static __init int kernel_decompress(struct bootmodule *mod)
return -ENOMEM;
}
mfn = _mfn(page_to_mfn(pages));
- output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR);
+ output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
rc = perform_gunzip(output, input, size);
clean_dcache_va_range(output, output_size);
@@ -807,7 +807,7 @@ void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
mfn_t mfn = _mfn(PFN_DOWN(pa));
unsigned int offs = pa & (PAGE_SIZE - 1);
unsigned int nr = PFN_UP(offs + len);
- void *ptr = __vmap(&mfn, nr, 1, 1, attributes);
+ void *ptr = __vmap(&mfn, nr, 1, 1, attributes, VMAP_DEFAULT);
if ( ptr == NULL )
return NULL;
@@ -6179,7 +6179,7 @@ void __iomem *ioremap(paddr_t pa, size_t len)
unsigned int offs = pa & (PAGE_SIZE - 1);
unsigned int nr = PFN_UP(offs + len);
- va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE) + offs;
+ va = __vmap(&mfn, nr, 1, 1, PAGE_HYPERVISOR_NOCACHE, VMAP_DEFAULT) + offs;
}
return (void __force __iomem *)va;
@@ -10,40 +10,43 @@
#include <asm/page.h>
static DEFINE_SPINLOCK(vm_lock);
-static void *__read_mostly vm_base;
-#define vm_bitmap ((unsigned long *)vm_base)
+static void *__read_mostly vm_base[VMAP_REGION_NR];
+#define vm_bitmap(x) ((unsigned long *)vm_base[x])
/* highest allocated bit in the bitmap */
-static unsigned int __read_mostly vm_top;
+static unsigned int __read_mostly vm_top[VMAP_REGION_NR];
/* total number of bits in the bitmap */
-static unsigned int __read_mostly vm_end;
+static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
/* lowest known clear bit in the bitmap */
-static unsigned int vm_low;
+static unsigned int vm_low[VMAP_REGION_NR];
-void __init vm_init(void)
+void __init vm_init_type(enum vmap_region type, void *start, void *end)
{
unsigned int i, nr;
unsigned long va;
- vm_base = (void *)VMAP_VIRT_START;
- vm_end = PFN_DOWN(arch_vmap_virt_end() - vm_base);
- vm_low = PFN_UP((vm_end + 7) / 8);
- nr = PFN_UP((vm_low + 7) / 8);
- vm_top = nr * PAGE_SIZE * 8;
+ ASSERT(!vm_base[type]);
- for ( i = 0, va = (unsigned long)vm_bitmap; i < nr; ++i, va += PAGE_SIZE )
+ vm_base[type] = start;
+ vm_end[type] = PFN_DOWN(end - start);
+ vm_low[type]= PFN_UP((vm_end[type] + 7) / 8);
+ nr = PFN_UP((vm_low[type] + 7) / 8);
+ vm_top[type] = nr * PAGE_SIZE * 8;
+
+ for ( i = 0, va = (unsigned long)vm_bitmap(type); i < nr; ++i, va += PAGE_SIZE )
{
struct page_info *pg = alloc_domheap_page(NULL, 0);
map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
clear_page((void *)va);
}
- bitmap_fill(vm_bitmap, vm_low);
+ bitmap_fill(vm_bitmap(type), vm_low[type]);
/* Populate page tables for the bitmap if necessary. */
- populate_pt_range(va, 0, vm_low - nr);
+ populate_pt_range(va, 0, vm_low[type] - nr);
}
-void *vm_alloc(unsigned int nr, unsigned int align)
+static void *vm_alloc(unsigned int nr, unsigned int align,
+ enum vmap_region t)
{
unsigned int start, bit;
@@ -52,27 +55,31 @@ void *vm_alloc(unsigned int nr, unsigned int align)
else if ( align & (align - 1) )
align &= -align;
+ ASSERT((t >= VMAP_DEFAULT) && (t < VMAP_REGION_NR));
+ if ( !vm_base[t] )
+ return NULL;
+
spin_lock(&vm_lock);
for ( ; ; )
{
struct page_info *pg;
- ASSERT(vm_low == vm_top || !test_bit(vm_low, vm_bitmap));
- for ( start = vm_low; start < vm_top; )
+ ASSERT(vm_low[t] == vm_top[t] || !test_bit(vm_low[t], vm_bitmap(t)));
+ for ( start = vm_low[t]; start < vm_top[t]; )
{
- bit = find_next_bit(vm_bitmap, vm_top, start + 1);
- if ( bit > vm_top )
- bit = vm_top;
+ bit = find_next_bit(vm_bitmap(t), vm_top[t], start + 1);
+ if ( bit > vm_top[t] )
+ bit = vm_top[t];
/*
* Note that this skips the first bit, making the
* corresponding page a guard one.
*/
start = (start + align) & ~(align - 1);
- if ( bit < vm_top )
+ if ( bit < vm_top[t] )
{
if ( start + nr < bit )
break;
- start = find_next_zero_bit(vm_bitmap, vm_top, bit + 1);
+ start = find_next_zero_bit(vm_bitmap(t), vm_top[t], bit + 1);
}
else
{
@@ -82,12 +89,12 @@ void *vm_alloc(unsigned int nr, unsigned int align)
}
}
- if ( start < vm_top )
+ if ( start < vm_top[t] )
break;
spin_unlock(&vm_lock);
- if ( vm_top >= vm_end )
+ if ( vm_top[t] >= vm_end[t] )
return NULL;
pg = alloc_domheap_page(NULL, 0);
@@ -96,23 +103,23 @@ void *vm_alloc(unsigned int nr, unsigned int align)
spin_lock(&vm_lock);
- if ( start >= vm_top )
+ if ( start >= vm_top[t] )
{
- unsigned long va = (unsigned long)vm_bitmap + vm_top / 8;
+ unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
{
clear_page((void *)va);
- vm_top += PAGE_SIZE * 8;
- if ( vm_top > vm_end )
- vm_top = vm_end;
+ vm_top[t] += PAGE_SIZE * 8;
+ if ( vm_top[t] > vm_end[t] )
+ vm_top[t] = vm_end[t];
continue;
}
}
free_domheap_page(pg);
- if ( start >= vm_top )
+ if ( start >= vm_top[t] )
{
spin_unlock(&vm_lock);
return NULL;
@@ -120,47 +127,58 @@ void *vm_alloc(unsigned int nr, unsigned int align)
}
for ( bit = start; bit < start + nr; ++bit )
- __set_bit(bit, vm_bitmap);
- if ( bit < vm_top )
- ASSERT(!test_bit(bit, vm_bitmap));
+ __set_bit(bit, vm_bitmap(t));
+ if ( bit < vm_top[t] )
+ ASSERT(!test_bit(bit, vm_bitmap(t)));
else
- ASSERT(bit == vm_top);
- if ( start <= vm_low + 2 )
- vm_low = bit;
+ ASSERT(bit == vm_top[t]);
+ if ( start <= vm_low[t] + 2 )
+ vm_low[t] = bit;
spin_unlock(&vm_lock);
- return vm_base + start * PAGE_SIZE;
+ return vm_base[t] + start * PAGE_SIZE;
}
-static unsigned int vm_index(const void *va)
+static unsigned int vm_index(const void *va, enum vmap_region type)
{
unsigned long addr = (unsigned long)va & ~(PAGE_SIZE - 1);
unsigned int idx;
+ unsigned long start = (unsigned long)vm_base[type];
- if ( addr < VMAP_VIRT_START + (vm_end / 8) ||
- addr >= VMAP_VIRT_START + vm_top * PAGE_SIZE )
+ if ( !start )
return 0;
- idx = PFN_DOWN(va - vm_base);
- return !test_bit(idx - 1, vm_bitmap) &&
- test_bit(idx, vm_bitmap) ? idx : 0;
+ if ( addr < start + (vm_end[type] / 8) ||
+ addr >= start + vm_top[type] * PAGE_SIZE )
+ return 0;
+
+ idx = PFN_DOWN(va - vm_base[type]);
+ return !test_bit(idx - 1, vm_bitmap(type)) &&
+ test_bit(idx, vm_bitmap(type)) ? idx : 0;
}
-static unsigned int vm_size(const void *va)
+static unsigned int vm_size(const void *va, enum vmap_region type)
{
- unsigned int start = vm_index(va), end;
+ unsigned int start = vm_index(va, type), end;
if ( !start )
return 0;
- end = find_next_zero_bit(vm_bitmap, vm_top, start + 1);
+ end = find_next_zero_bit(vm_bitmap(type), vm_top[type], start + 1);
- return min(end, vm_top) - start;
+ return min(end, vm_top[type]) - start;
}
-void vm_free(const void *va)
+static void vm_free(const void *va)
{
- unsigned int bit = vm_index(va);
+ enum vmap_region type = VMAP_DEFAULT;
+ unsigned int bit = vm_index(va, type);
+
+ if ( !bit )
+ {
+ type = VMAP_XEN;
+ bit = vm_index(va, type);
+ }
if ( !bit )
{
@@ -169,29 +187,55 @@ void vm_free(const void *va)
}
spin_lock(&vm_lock);
- if ( bit < vm_low )
+ if ( bit < vm_low[type] )
{
- vm_low = bit - 1;
- while ( !test_bit(vm_low - 1, vm_bitmap) )
- --vm_low;
+ vm_low[type] = bit - 1;
+ while ( !test_bit(vm_low[type] - 1, vm_bitmap(type)) )
+ --vm_low[type];
}
- while ( __test_and_clear_bit(bit, vm_bitmap) )
- if ( ++bit == vm_top )
+ while ( __test_and_clear_bit(bit, vm_bitmap(type)) )
+ if ( ++bit == vm_top[type] )
break;
spin_unlock(&vm_lock);
}
+static void vunmap_pages(const void *va, unsigned int pages)
+{
+#ifndef _PAGE_NONE
+ unsigned long addr = (unsigned long)va;
+
+ destroy_xen_mappings(addr, addr + PAGE_SIZE * pages);
+#else /* Avoid tearing down intermediate page tables. */
+ map_pages_to_xen((unsigned long)va, 0, pages, _PAGE_NONE);
+#endif
+ vm_free(va);
+}
+
+void vunmap(const void *va)
+{
+ enum vmap_region type = VMAP_DEFAULT;
+ unsigned int pages = vm_size(va, type);
+
+ if ( !pages )
+ {
+ type = VMAP_XEN;
+ pages = vm_size(va, type);
+ }
+ vunmap_pages(va, pages);
+}
+
void *__vmap(const mfn_t *mfn, unsigned int granularity,
- unsigned int nr, unsigned int align, unsigned int flags)
+ unsigned int nr, unsigned int align, unsigned int flags,
+ enum vmap_region type)
{
- void *va = vm_alloc(nr * granularity, align);
+ void *va = vm_alloc(nr * granularity, align, type);
unsigned long cur = (unsigned long)va;
for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
{
if ( map_pages_to_xen(cur, mfn_x(*mfn), granularity, flags) )
{
- vunmap(va);
+ vunmap_pages(va, vm_size(va, type));
va = NULL;
}
}
@@ -201,22 +245,10 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity,
void *vmap(const mfn_t *mfn, unsigned int nr)
{
- return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR);
-}
-
-void vunmap(const void *va)
-{
-#ifndef _PAGE_NONE
- unsigned long addr = (unsigned long)va;
-
- destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
-#else /* Avoid tearing down intermediate page tables. */
- map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE);
-#endif
- vm_free(va);
+ return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
}
-void *vmalloc(size_t size)
+static void *vmalloc_type(size_t size, enum vmap_region type)
{
mfn_t *mfn;
size_t pages, i;
@@ -238,7 +270,7 @@ void *vmalloc(size_t size)
mfn[i] = _mfn(page_to_mfn(pg));
}
- va = vmap(mfn, pages);
+ va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
if ( va == NULL )
goto error;
@@ -252,9 +284,19 @@ void *vmalloc(size_t size)
return NULL;
}
-void *vzalloc(size_t size)
+void *vmalloc(size_t size)
{
- void *p = vmalloc(size);
+ return vmalloc_type(size, VMAP_DEFAULT);
+}
+
+void *vmalloc_xen(size_t size)
+{
+ return vmalloc_type(size, VMAP_XEN);
+}
+
+static void *vzalloc_type(size_t size, enum vmap_region type)
+{
+ void *p = vmalloc_type(size, type);
int i;
if ( p == NULL )
@@ -266,16 +308,32 @@ void *vzalloc(size_t size)
return p;
}
+void *vzalloc(size_t size)
+{
+ return vzalloc_type(size, VMAP_DEFAULT);
+}
+
+void *vzalloc_xen(size_t size)
+{
+ return vzalloc_type(size, VMAP_XEN);
+}
+
void vfree(void *va)
{
unsigned int i, pages;
struct page_info *pg;
PAGE_LIST_HEAD(pg_list);
+ enum vmap_region type = VMAP_DEFAULT;
if ( !va )
return;
- pages = vm_size(va);
+ pages = vm_size(va, type);
+ if ( !pages )
+ {
+ type = VMAP_XEN;
+ pages = vm_size(va, type);
+ }
ASSERT(pages);
for ( i = 0; i < pages; i++ )
@@ -285,7 +343,7 @@ void vfree(void *va)
ASSERT(page);
page_list_add(page, &pg_list);
}
- vunmap(va);
+ vunmap_pages(va, pages);
while ( (pg = page_list_remove_head(&pg_list)) != NULL )
free_domheap_page(pg);
@@ -97,7 +97,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
if (IS_ENABLED(CONFIG_X86) && !((phys + size - 1) >> 20))
return __va(phys);
return __vmap(&mfn, PFN_UP(offs + size), 1, 1,
- ACPI_MAP_MEM_ATTR) + offs;
+ ACPI_MAP_MEM_ATTR, VMAP_DEFAULT) + offs;
}
return __acpi_map_table(phys, size);
}
@@ -4,15 +4,24 @@
#include <xen/mm.h>
#include <asm/page.h>
-void *vm_alloc(unsigned int nr, unsigned int align);
-void vm_free(const void *);
+enum vmap_region {
+ VMAP_DEFAULT,
+ VMAP_XEN,
+ VMAP_REGION_NR,
+};
-void *__vmap(const mfn_t *mfn, unsigned int granularity,
- unsigned int nr, unsigned int align, unsigned int flags);
+void vm_init_type(enum vmap_region type, void *start, void *end);
+
+void *__vmap(const mfn_t *mfn, unsigned int granularity, unsigned int nr,
+ unsigned int align, unsigned int flags, enum vmap_region);
void *vmap(const mfn_t *mfn, unsigned int nr);
void vunmap(const void *);
+
void *vmalloc(size_t size);
+void *vmalloc_xen(size_t size);
+
void *vzalloc(size_t size);
+void *vzalloc_xen(size_t size);
void vfree(void *va);
void __iomem *ioremap(paddr_t, size_t);
@@ -24,7 +33,10 @@ static inline void iounmap(void __iomem *va)
vunmap((void *)(addr & PAGE_MASK));
}
-void vm_init(void);
void *arch_vmap_virt_end(void);
+static inline void vm_init(void)
+{
+ vm_init_type(VMAP_DEFAULT, (void *)VMAP_VIRT_START, arch_vmap_virt_end());
+}
#endif /* __XEN_VMAP_H__ */