diff mbox

[PATCHv1,2/4] x86/domain: Compile with lock_profile=y enabled.

Message ID 1450447746-9305-3-git-send-email-david.vrabel@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Vrabel Dec. 18, 2015, 2:09 p.m. UTC
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Our 'struct domain' has when lock profiling is enabled is bigger than
one page.

We can't use vmap nor vzalloc as both of those stash the
physical address in struct page which makes the assumptions
in 'arch_init_memory' trip over ASSERTs.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
 xen/arch/x86/domain.c | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 2c3bb09..40d9d7c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -231,6 +231,7 @@  static unsigned int __init noinline _domain_struct_bits(void)
 struct domain *alloc_domain_struct(void)
 {
     struct domain *d;
+    unsigned int order = get_order_from_bytes(sizeof(*d));
 #ifdef CONFIG_BIGMEM
     const unsigned int bits = 0;
 #else
@@ -244,10 +245,18 @@  struct domain *alloc_domain_struct(void)
          bits = _domain_struct_bits();
 #endif
 
+
+#ifndef LOCK_PROFILE
     BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
-    d = alloc_xenheap_pages(0, MEMF_bits(bits));
+#endif
+    d = alloc_xenheap_pages(order, MEMF_bits(bits));
     if ( d != NULL )
-        clear_page(d);
+    {
+        unsigned int sz;
+
+        for ( sz = 0; sz < (PAGE_SIZE << order); sz += PAGE_SIZE )
+            clear_page((void *)d + sz);
+    }
     return d;
 }