@@ -1481,32 +1481,6 @@ void clear_zone_contiguous(struct zone *zone)
}
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-static void __init deferred_free_range(unsigned long pfn,
- unsigned long nr_pages)
-{
- struct page *page;
- unsigned long i;
-
- if (!nr_pages)
- return;
-
- page = pfn_to_page(pfn);
-
- /* Free a large naturally-aligned chunk if possible */
- if (nr_pages == pageblock_nr_pages &&
- (pfn & (pageblock_nr_pages - 1)) == 0) {
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_core(page, pageblock_order);
- return;
- }
-
- for (i = 0; i < nr_pages; i++, page++, pfn++) {
- if ((pfn & (pageblock_nr_pages - 1)) == 0)
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_core(page, 0);
- }
-}
-
/* Completion tracking for deferred_init_memmap() threads */
static atomic_t pgdat_init_n_undone __initdata;
static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
@@ -1518,48 +1492,89 @@ static inline void __init pgdat_init_report_one_done(void)
}
/*
- * Returns true if page needs to be initialized or freed to buddy allocator.
+ * Returns count if page range needs to be initialized or freed
*
- * First we check if pfn is valid on architectures where it is possible to have
- * holes within pageblock_nr_pages. On systems where it is not possible, this
- * function is optimized out.
+ * First we check if the contiguous pfns are valid on architectures where it
+ * is possible to have holes within pageblock_nr_pages. On systems where it
+ * is not possible, this function is optimized out.
+ *
+ * Then, we check if a current large page is valid by only checking the
+ * validity of the head pfn.
*
- * Then, we check if a current large page is valid by only checking the validity
- * of the head pfn.
*/
-static inline bool __init deferred_pfn_valid(unsigned long pfn)
+static unsigned long __next_pfn_valid_range(unsigned long *pfn,
+ unsigned long *i,
+ unsigned long end_pfn)
{
- if (!pfn_valid_within(pfn))
- return false;
- if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
- return false;
- return true;
+ unsigned long start_pfn = *i;
+
+ while (start_pfn < end_pfn) {
+ unsigned long t = ALIGN(start_pfn + 1, pageblock_nr_pages);
+ unsigned long pageblock_pfn = min(t, end_pfn);
+ unsigned long count = 0;
+
+#ifndef CONFIG_HOLES_IN_ZONE
+ if (pfn_valid(start_pfn))
+ count = pageblock_pfn - start_pfn;
+ start_pfn = pageblock_pfn;
+#else
+ while (start_pfn < pageblock_pfn) {
+ if (pfn_valid(start_pfn++)) {
+ count++;
+ continue;
+ }
+
+ if (!count)
+ continue;
+
+ /*
+ * The last PFN was invalid, report the block of
+ * PFNs we currently have available and skip over
+ * the invalid one.
+ */
+ *pfn = start_pfn - (count + 1);
+ *i = start_pfn;
+ return count;
+ }
+#endif
+ if (!count)
+ continue;
+
+ *pfn = start_pfn - count;
+ *i = start_pfn;
+ return count;
+ }
+
+ return 0;
}
+#define for_each_deferred_pfn_valid_range(pfn, count, i, start_pfn, end_pfn) \
+ for (i = (start_pfn), \
+ count = __next_pfn_valid_range(&pfn, &i, (end_pfn)); \
+ count; \
+ count = __next_pfn_valid_range(&pfn, &i, (end_pfn)))
+
/*
* Free pages to buddy allocator. Try to free aligned pages in
* pageblock_nr_pages sizes.
*/
-static void __init deferred_free_pages(unsigned long pfn,
+static void __init deferred_free_pages(unsigned long start_pfn,
unsigned long end_pfn)
{
- unsigned long nr_pgmask = pageblock_nr_pages - 1;
- unsigned long nr_free = 0;
-
- for (; pfn < end_pfn; pfn++) {
- if (!deferred_pfn_valid(pfn)) {
- deferred_free_range(pfn - nr_free, nr_free);
- nr_free = 0;
- } else if (!(pfn & nr_pgmask)) {
- deferred_free_range(pfn - nr_free, nr_free);
- nr_free = 1;
- touch_nmi_watchdog();
+ unsigned long i, pfn, count;
+
+ for_each_deferred_pfn_valid_range(pfn, count, i, start_pfn, end_pfn) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (count == pageblock_nr_pages) {
+ __free_pages_core(page, pageblock_order);
} else {
- nr_free++;
+ while (count--)
+ __free_pages_core(page++, 0);
}
+
+ touch_nmi_watchdog();
}
- /* Free the last block of pages to allocator */
- deferred_free_range(pfn - nr_free, nr_free);
}
/*
@@ -1568,29 +1583,22 @@ static void __init deferred_free_pages(unsigned long pfn,
* Return number of pages initialized.
*/
static unsigned long __init deferred_init_pages(struct zone *zone,
- unsigned long pfn,
+ unsigned long start_pfn,
unsigned long end_pfn)
{
- unsigned long nr_pgmask = pageblock_nr_pages - 1;
int nid = zone_to_nid(zone);
+ unsigned long i, pfn, count;
unsigned long nr_pages = 0;
int zid = zone_idx(zone);
- struct page *page = NULL;
- for (; pfn < end_pfn; pfn++) {
- if (!deferred_pfn_valid(pfn)) {
- page = NULL;
- continue;
- } else if (!page || !(pfn & nr_pgmask)) {
- page = pfn_to_page(pfn);
- touch_nmi_watchdog();
- } else {
- page++;
- }
- __init_single_page(page, pfn, zid, nid);
- nr_pages++;
+ for_each_deferred_pfn_valid_range(pfn, count, i, start_pfn, end_pfn) {
+ nr_pages += count;
+ __init_pageblock(pfn, count, zid, nid, NULL, false);
+
+ touch_nmi_watchdog();
}
- return (nr_pages);
+
+ return nr_pages;
}
/*