diff mbox series

[kvm-unit-tests,v1,8/8] lib/vmalloc: add locking and a check for initialization

Message ID 20200622162141.279716-9-imbrenda@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Minor fixes, improvements, and cleanup | expand

Commit Message

Claudio Imbrenda June 22, 2020, 4:21 p.m. UTC
Make sure init_alloc_vpage is never called when vmalloc is in use.

Get both init_alloc_vpage and setup_vm to use the lock.

For setup_vm we only check at the end because at least on some
architectures setup_mmu can call init_alloc_vpage, which would cause
a deadlock.

Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
---
 lib/vmalloc.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/lib/vmalloc.c b/lib/vmalloc.c
index 83e34aa..10f15af 100644
--- a/lib/vmalloc.c
+++ b/lib/vmalloc.c
@@ -37,11 +37,6 @@  void *alloc_vpage(void)
 	return alloc_vpages(1);
 }
 
-void init_alloc_vpage(void *top)
-{
-	vfree_top = top;
-}
-
 void *vmap(phys_addr_t phys, size_t size)
 {
 	void *mem, *p;
@@ -96,6 +91,14 @@  void __attribute__((__weak__)) find_highmem(void)
 {
 }
 
+void init_alloc_vpage(void *top)
+{
+	spin_lock(&lock);
+	assert(alloc_ops != &vmalloc_ops);
+	vfree_top = top;
+	spin_unlock(&lock);
+}
+
 void setup_vm()
 {
 	phys_addr_t base, top;
@@ -124,5 +127,8 @@  void setup_vm()
 		free_pages(phys_to_virt(base), top - base);
 	}
 
+	spin_lock(&lock);
+	assert(alloc_ops != &vmalloc_ops);
 	alloc_ops = &vmalloc_ops;
+	spin_unlock(&lock);
 }