@@ -45,13 +45,16 @@ void idle_loop(void)
if ( cpu_is_offline(smp_processor_id()) )
stop_cpu();
- local_irq_disable();
- if ( cpu_is_haltable(smp_processor_id()) )
+ if ( !scrub_free_pages() )
{
- dsb(sy);
- wfi();
+ local_irq_disable();
+ if ( cpu_is_haltable(smp_processor_id()) )
+ {
+ dsb(sy);
+ wfi();
+ }
+ local_irq_enable();
}
- local_irq_enable();
do_tasklet();
do_softirq();
@@ -118,7 +118,8 @@ static void idle_loop(void)
{
if ( cpu_is_offline(smp_processor_id()) )
play_dead();
- (*pm_idle)();
+ if ( !scrub_free_pages() )
+ (*pm_idle)();
do_tasklet();
do_softirq();
/*
@@ -1043,16 +1043,44 @@ static struct page_info *merge_chunks(struct page_info *pg, unsigned int node,
return pg;
}
-static void scrub_free_pages(unsigned int node)
+static nodemask_t node_scrubbing;
+static unsigned int node_to_scrub(bool_t get_node)
+{
+ nodeid_t node = cpu_to_node(smp_processor_id()), local_node;
+
+ if ( node == NUMA_NO_NODE )
+ node = 0;
+ local_node = node;
+
+ /*
+ * Check local node fist and then then see if there are any memory-only
+ * nodes that may need scrubbing
+ */
+ while ( 1 )
+ {
+ if ( node_need_scrub[node] &&
+ (!node_test_and_set(node, node_scrubbing) || !get_node) )
+ return node;
+ do {
+ node = cycle_node(node, node_online_map);
+ if ( node == local_node )
+ return NUMA_NO_NODE;
+ } while ( !cpumask_empty(&node_to_cpumask(node)) );
+ }
+}
+
+bool_t scrub_free_pages()
{
struct page_info *pg;
unsigned int i, zone;
- int order;
+ int order, cpu = smp_processor_id();
+ nodeid_t node;
- ASSERT(spin_is_locked(&heap_lock));
+ node = node_to_scrub(true);
+ if ( node == NUMA_NO_NODE )
+ return false;
- if ( !node_need_scrub[node] )
- return;
+ spin_lock(&heap_lock);
for ( zone = 0; zone < NR_ZONES; zone++ )
{
@@ -1066,7 +1094,11 @@ static void scrub_free_pages(unsigned int node)
break;
for ( i = 0; i < (1UL << order); i++)
+ {
scrub_one_page(&pg[i]);
+ if ( softirq_pending(cpu) )
+ goto out;
+ }
pg->count_info &= ~PGC_need_scrub;
@@ -1077,7 +1109,12 @@ static void scrub_free_pages(unsigned int node)
}
}
}
- }
+
+ out:
+ spin_unlock(&heap_lock);
+ node_clear(node, node_scrubbing);
+ return softirq_pending(cpu) || (node_to_scrub(false) != NUMA_NO_NODE);
+}
/* Free 2^@order set of pages. */
@@ -1142,9 +1179,6 @@ static void free_heap_pages(
if ( tainted )
reserve_offlined_page(pg);
- if ( need_scrub )
- scrub_free_pages(node);
-
spin_unlock(&heap_lock);
}
@@ -138,6 +138,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe);
void xenheap_max_mfn(unsigned long mfn);
void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
void free_xenheap_pages(void *v, unsigned int order);
+bool_t scrub_free_pages(void);
#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
#define free_xenheap_page(v) (free_xenheap_pages(v,0))
/* Map machine page range in Xen virtual address space. */
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> --- Changes in v2: * Added node_to_scrub() * Include softirq_pending() in scrub_free_pages()'s return value xen/arch/arm/domain.c | 13 +++++++---- xen/arch/x86/domain.c | 3 +- xen/common/page_alloc.c | 52 ++++++++++++++++++++++++++++++++++++++-------- xen/include/xen/mm.h | 1 + 4 files changed, 54 insertions(+), 15 deletions(-)