@@ -19,6 +19,10 @@ static unsigned int __read_mostly vm_end;
/* lowest known clear bit in the bitmap */
static unsigned int vm_low;
+static LIST_HEAD(vm_area_list);
+
+static DEFINE_SPINLOCK(vm_area_lock);
+
void __init vm_init(void)
{
unsigned int i, nr;
@@ -146,12 +150,34 @@ static unsigned int vm_index(const void *va)
test_bit(idx, vm_bitmap) ? idx : 0;
}
+static const struct vm_area *vm_find(const void *va)
+{
+ const struct vm_area *found = NULL, *vm;
+
+ spin_lock(&vm_area_lock);
+ list_for_each_entry( vm, &vm_area_list, list )
+ {
+ if ( vm->va != va )
+ continue;
+ found = vm;
+ break;
+ }
+ spin_unlock(&vm_area_lock);
+
+ return found;
+}
+
static unsigned int vm_size(const void *va)
{
unsigned int start = vm_index(va), end;
if ( !start )
+ {
+ const struct vm_area *vm = vm_find(va);
+ if ( vm )
+ return vm->pages;
return 0;
+ }
end = find_next_zero_bit(vm_bitmap, vm_top, start + 1);
@@ -164,6 +190,17 @@ void vm_free(const void *va)
if ( !bit )
{
+ struct vm_area *vm = (struct vm_area *)vm_find(va);
+
+ if ( vm )
+ {
+ spin_lock(&vm_area_lock);
+ list_del(&vm->list);
+ spin_unlock(&vm_area_lock);
+ xfree(vm->mfn);
+ xfree(vm);
+ return;
+ }
WARN_ON(va != NULL);
return;
}
@@ -199,6 +236,23 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity,
return va;
}
+static bool_t vmap_range(const mfn_t *mfn, unsigned long va, unsigned int nr)
+{
+ unsigned long cur = va;
+
+ for ( ; va && nr--; ++mfn, cur += PAGE_SIZE )
+ {
+ if ( map_pages_to_xen(cur, mfn_x(*mfn), 1, PAGE_HYPERVISOR) )
+ {
+ if ( cur != va )
+ destroy_xen_mappings(va, cur);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
void *vmap(const mfn_t *mfn, unsigned int nr)
{
return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR);
@@ -216,6 +270,56 @@ void vunmap(const void *va)
vm_free(va);
}
+struct vm_area *vmalloc_range(size_t size, unsigned long start)
+{
+ mfn_t *mfn;
+ size_t pages, i;
+ struct page_info *pg;
+ struct vm_area *vm = NULL;
+
+ ASSERT(size);
+
+ pages = PFN_UP(size);
+ mfn = xmalloc_array(mfn_t, pages);
+ if ( mfn == NULL )
+ return NULL;
+
+ vm = xmalloc(struct vm_area);
+ if ( !vm )
+ {
+ xfree(mfn);
+ return NULL;
+ }
+ vm->mfn = mfn;
+
+ for ( i = 0; i < pages; i++ )
+ {
+ pg = alloc_domheap_page(NULL, 0);
+ if ( pg == NULL )
+ goto error;
+ mfn[i] = _mfn(page_to_mfn(pg));
+ }
+
+ if ( !vmap_range(mfn, start, pages) )
+ goto error;
+
+ vm->va = (void *)start;
+ vm->pages = pages;
+
+ spin_lock(&vm_area_lock);
+ list_add(&vm->list, &vm_area_list);
+ spin_unlock(&vm_area_lock);
+
+ return vm;
+
+ error:
+ while ( i-- )
+ free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
+ xfree(vm->mfn);
+ xfree(vm);
+ return NULL;
+}
+
void *vmalloc(size_t size)
{
mfn_t *mfn;
@@ -12,6 +12,16 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity,
void *vmap(const mfn_t *mfn, unsigned int nr);
void vunmap(const void *);
void *vmalloc(size_t size);
+
+struct vm_area {
+ struct list_head list;
+ mfn_t *mfn;
+ void *va;
+ unsigned int pages;
+};
+
+struct vm_area *vmalloc_range(size_t size, unsigned long start);
+
void *vzalloc(size_t size);
void vfree(void *va);