diff mbox

[kvm-unit-tests,v2,09/12] page_alloc: add yet another memalign

Message ID 20180117104005.29211-10-drjones@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Jones Jan. 17, 2018, 10:40 a.m. UTC
If we want both early alloc ops and alloc_page(), then it's best
to just give all the memory to page_alloc and then base the
early alloc ops on that.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/alloc_page.c | 31 +++++++++++++++++++++++++++++++
 lib/alloc_page.h |  1 +
 2 files changed, 32 insertions(+)
diff mbox

Patch

diff --git a/lib/alloc_page.c b/lib/alloc_page.c
index ca11496829a0..361b584cab2a 100644
--- a/lib/alloc_page.c
+++ b/lib/alloc_page.c
@@ -5,7 +5,9 @@ 
  * with page granularity.
  */
 #include "libcflat.h"
+#include "alloc.h"
 #include "alloc_phys.h"
+#include "bitops.h"
 #include <asm/page.h>
 #include <asm/io.h>
 #include <asm/spinlock.h>
@@ -134,3 +136,32 @@  void free_page(void *page)
 	freelist = page;
 	spin_unlock(&lock);
 }
+
+static void *page_memalign(size_t alignment, size_t size)
+{
+	unsigned long n = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+	unsigned long order;
+
+	if (!size)
+		return NULL;
+
+	order = is_power_of_2(n) ? fls(n) : fls(n) + 1;
+
+	return alloc_pages(order);
+}
+
+static void page_free(void *mem, size_t size)
+{
+	free_pages(mem, size);
+}
+
+static struct alloc_ops page_alloc_ops = {
+	.memalign = page_memalign,
+	.free = page_free,
+	.align_min = PAGE_SIZE,
+};
+
+void page_alloc_ops_enable(void)
+{
+	alloc_ops = &page_alloc_ops;
+}
diff --git a/lib/alloc_page.h b/lib/alloc_page.h
index 1c2b3ec9add6..51d48414a47e 100644
--- a/lib/alloc_page.h
+++ b/lib/alloc_page.h
@@ -9,6 +9,7 @@ 
 #define ALLOC_PAGE_H 1
 
 bool page_alloc_initialized(void);
+void page_alloc_ops_enable(void);
 void *alloc_page();
 void *alloc_pages(unsigned long order);
 void free_page(void *page);