@@ -16,6 +16,8 @@
#ifndef __ASSEMBLY__
+#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
#ifdef __x86_64__
#define LARGE_PAGE_SIZE (512 * PAGE_SIZE)
#else
@@ -1,37 +1,54 @@
#include "fwcfg.h"
#include "vm.h"
#include "libcflat.h"
+#include "asm/spinlock.h"
+static struct spinlock heap_lock;
+static struct spinlock vm_lock;
static void *free = 0;
static void *vfree_top = 0;
static void free_memory(void *mem, unsigned long size)
{
+ assert(!((unsigned long)mem & ~PAGE_MASK));
+
+ spin_lock(&heap_lock);
+
+ free = NULL;
+
while (size >= PAGE_SIZE) {
*(void **)mem = free;
free = mem;
mem += PAGE_SIZE;
size -= PAGE_SIZE;
}
+
+ spin_unlock(&heap_lock);
}
void *alloc_page()
{
void *p;
+ spin_lock(&heap_lock);
+
if (!free)
- return 0;
+ return NULL;
p = free;
free = *(void **)free;
+ spin_unlock(&heap_lock);
+
return p;
}
void free_page(void *page)
{
+ spin_lock(&heap_lock);
*(void **)page = free;
free = page;
+ spin_unlock(&heap_lock);
}
extern char edata;
@@ -162,11 +179,13 @@ void *vmalloc(unsigned long size)
void *mem, *p;
unsigned pages;
- size += sizeof(unsigned long);
+ size = PAGE_ALIGN(size + sizeof(unsigned long));
- size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+ spin_lock(&vm_lock);
vfree_top -= size;
mem = p = vfree_top;
+ spin_unlock(&vm_lock);
+
pages = size / PAGE_SIZE;
while (pages--) {
install_page(phys_to_virt(read_cr3()), virt_to_phys(alloc_page()), p);
@@ -179,12 +198,18 @@ void *vmalloc(unsigned long size)
uint64_t virt_to_phys_cr3(void *mem)
{
- return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & (PAGE_SIZE - 1));
+ return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & ~PAGE_MASK);
}
void vfree(void *mem)
{
- unsigned long size = ((unsigned long *)mem)[-1];
+ unsigned long size;
+
+ if (mem == NULL)
+ return;
+
+ mem -= sizeof(unsigned long);
+ size = *(unsigned long *)mem;
while (size) {
free_page(phys_to_virt(*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK));
@@ -198,11 +223,14 @@ void *vmap(unsigned long long phys, unsigned long size)
void *mem, *p;
unsigned pages;
- size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
- vfree_top -= size;
+ size = PAGE_ALIGN(size);
phys &= ~(unsigned long long)(PAGE_SIZE - 1);
+ spin_lock(&vm_lock);
+ vfree_top -= size;
mem = p = vfree_top;
+ spin_unlock(&vm_lock);
+
pages = size / PAGE_SIZE;
while (pages--) {
install_page(phys_to_virt(read_cr3()), phys, p);
@@ -214,7 +242,9 @@ void *vmap(unsigned long long phys, unsigned long size)
void *alloc_vpages(ulong nr)
{
+ spin_lock(&vm_lock);
vfree_top -= PAGE_SIZE * nr;
+ spin_unlock(&vm_lock);
return vfree_top;
}
Ensure we're page aligned, add locking, just return if NULL is passed to vfree(). Signed-off-by: Andrew Jones <drjones@redhat.com> --- lib/x86/asm/page.h | 2 ++ lib/x86/vm.c | 44 +++++++++++++++++++++++++++++++++++++------- 2 files changed, 39 insertions(+), 7 deletions(-)