@@ -121,9 +121,13 @@ struct page_info
/* Page is Xen heap? */
#define _PGC_xen_heap PG_shift(2)
#define PGC_xen_heap PG_mask(1, 2)
- /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
#define _PGC_static PG_shift(3)
#define PGC_static PG_mask(1, 3)
+#else
+#define PGC_static 0
+#endif
/* ... */
/* Page is broken? */
#define _PGC_broken PG_shift(7)
@@ -1496,7 +1496,10 @@ void put_page(struct page_info *page)
if ( unlikely((nx & PGC_count_mask) == 0) )
{
- free_domheap_page(page);
+ if ( unlikely(nx & PGC_static) )
+ free_domstatic_page(page);
+ else
+ free_domheap_page(page);
}
}
@@ -2694,12 +2694,14 @@ struct domain *get_pg_owner(domid_t domid)
#ifdef CONFIG_STATIC_MEMORY
/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
- bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ bool need_scrub)
{
mfn_t mfn = page_to_mfn(pg);
unsigned long i;
+ spin_lock(&heap_lock);
+
for ( i = 0; i < nr_mfns; i++ )
{
mark_page_free(&pg[i], mfn_add(mfn, i));
@@ -2710,9 +2712,41 @@ void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
scrub_one_page(pg);
}
- /* In case initializing page of static memory, mark it PGC_static. */
pg[i].count_info |= PGC_static;
}
+
+ spin_unlock(&heap_lock);
+}
+
+void free_domstatic_page(struct page_info *page)
+{
+ struct domain *d = page_get_owner(page);
+ bool drop_dom_ref;
+
+ if ( unlikely(!d) )
+ {
+ printk(XENLOG_G_ERR
+ "The about-to-free static page %"PRI_mfn" must be owned by a domain\n",
+ mfn_x(page_to_mfn(page)));
+ ASSERT_UNREACHABLE();
+ return;
+ }
+
+ ASSERT_ALLOC_CONTEXT();
+
+ /* NB. May recursively lock from relinquish_memory(). */
+ spin_lock_recursive(&d->page_alloc_lock);
+
+ arch_free_heap_page(d, page);
+
+ drop_dom_ref = !domain_adjust_tot_pages(d, -1);
+
+ spin_unlock_recursive(&d->page_alloc_lock);
+
+ free_staticmem_pages(page, 1, scrub_debug);
+
+ if ( drop_dom_ref )
+ put_domain(d);
}
/*
@@ -85,13 +85,12 @@ bool scrub_free_pages(void);
} while ( false )
#define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
-#ifdef CONFIG_STATIC_MEMORY
/* These functions are for static memory */
void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
bool need_scrub);
+void free_domstatic_page(struct page_info *page);
int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
unsigned int memflags);
-#endif
/* Map machine page range in Xen virtual address space. */
int map_pages_to_xen(