@@ -108,9 +108,11 @@ struct page_info
/* Page is Xen heap? */
#define _PGC_xen_heap PG_shift(2)
#define PGC_xen_heap PG_mask(1, 2)
- /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
#define _PGC_static PG_shift(3)
#define PGC_static PG_mask(1, 3)
+#endif
/* ... */
/* Page is broken? */
#define _PGC_broken PG_shift(7)
@@ -1443,6 +1443,13 @@ static void free_heap_pages(
ASSERT(order <= MAX_ORDER);
+ if ( unlikely(pg->count_info & PGC_static) )
+ {
+ /* Pages of static memory shall not go back to the heap. */
+ free_staticmem_pages(pg, 1UL << order, need_scrub);
+ return;
+ }
+
spin_lock(&heap_lock);
for ( i = 0; i < (1 << order); i++ )
@@ -2636,12 +2643,14 @@ struct domain *get_pg_owner(domid_t domid)
#ifdef CONFIG_STATIC_MEMORY
/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ bool need_scrub)
{
mfn_t mfn = page_to_mfn(pg);
unsigned long i;
+ spin_lock(&heap_lock);
+
for ( i = 0; i < nr_mfns; i++ )
{
mark_page_free(&pg[i], mfn_add(mfn, i));
@@ -2652,9 +2661,10 @@ void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
scrub_one_page(pg);
}
- /* In case initializing page of static memory, mark it PGC_static. */
pg[i].count_info |= PGC_static;
}
+
+ spin_unlock(&heap_lock);
}
/*
@@ -85,13 +85,11 @@ bool scrub_free_pages(void);
} while ( false )
#define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
-#ifdef CONFIG_STATIC_MEMORY
/* These functions are for static memory */
void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
bool need_scrub);
int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
unsigned int memflags);
-#endif
/* Map machine page range in Xen virtual address space. */
int map_pages_to_xen(