diff mbox

Linux 4.4 boot crash on xen 4.5.3 with dom0_mem == max

Message ID 573C55AC.8020003@suse.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jürgen Groß May 18, 2016, 11:44 a.m. UTC
On 17/05/16 22:50, Ed Swierk wrote:
> I added some more instrumentation and discovered that the result of
> xen_count_remap_pages() (0x85dea) is one less than the actual number
> of pages remapped by xen_set_identity_and_remap() (0x85deb).
> 
> The two functions differ in their handling of a xen_e820_map entry
> whose size is not a multiple of the page size.  The entry starting at
> 0x68000 has size 0x33400.  xen_count_remap_pages() rounds up when
> computing the end_pfn (to 0x9c), while xen_set_identity_and_remap()
> rounds down (to 0x9b).  Thus xen_count_remap_pages() counts the
> remapped space following the entry as one page smaller than the other
> function does.

Could you please test the attached patch? It follows your idea of the
combined function, but is using a function pointer instead of a flag.
The patch is based on 4.6, but I believe it should just work on 4.4.


Juergen

Comments

Ed Swierk May 18, 2016, 2:39 p.m. UTC | #1
Nice implementation. I tested it and it fixes the problem on the affected
system.

Just a minor typo in a comment: "it's duty" should be "its duty".

--Ed


On Wed, May 18, 2016 at 4:44 AM, Juergen Gross <jgross@suse.com> wrote:

> On 17/05/16 22:50, Ed Swierk wrote:
> > I added some more instrumentation and discovered that the result of
> > xen_count_remap_pages() (0x85dea) is one less than the actual number
> > of pages remapped by xen_set_identity_and_remap() (0x85deb).
> >
> > The two functions differ in their handling of a xen_e820_map entry
> > whose size is not a multiple of the page size.  The entry starting at
> > 0x68000 has size 0x33400.  xen_count_remap_pages() rounds up when
> > computing the end_pfn (to 0x9c), while xen_set_identity_and_remap()
> > rounds down (to 0x9b).  Thus xen_count_remap_pages() counts the
> > remapped space following the entry as one page smaller than the other
> > function does.
>
> Could you please test the attached patch? It follows your idea of the
> combined function, but is using a function pointer instead of a flag.
> The patch is based on 4.6, but I believe it should just work on 4.4.
>
>
> Juergen
>
>
Jürgen Groß May 18, 2016, 2:45 p.m. UTC | #2
On 18/05/16 16:39, Ed Swierk wrote:
> Nice implementation. I tested it and it fixes the problem on the
> affected system.
> 
> Just a minor typo in a comment: "it's duty" should be "its duty".

Thanks. Corrected and sent to lkml.


Juergen
diff mbox

Patch

From 0d665e37ef79c35cc71830a46d3cf1746b5d01ee Mon Sep 17 00:00:00 2001
From: Juergen Gross <jgross@suse.com>
Date: Wed, 18 May 2016 12:54:42 +0200
Subject: [PATCH] xen: use same main loop for counting and remapping pages

Instead of having two functions for cycling through the E820 map in
order to count to be remapped pages and remap them later, just use one
function with a caller supplied sub-function called for each region to
be processed. This eliminates the possibility of a mismatch between
both loops which showed up in certain configurations.

Suggested-by: Ed Swierk <eswierk@skyportsystems.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
---
 arch/x86/xen/setup.c | 65 +++++++++++++++++++++-------------------------------
 1 file changed, 26 insertions(+), 39 deletions(-)

diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 7ab2951..42b8fda 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -393,6 +393,9 @@  static unsigned long __init xen_set_identity_and_remap_chunk(
 	unsigned long i = 0;
 	unsigned long n = end_pfn - start_pfn;
 
+	if (remap_pfn == 0)
+		remap_pfn = nr_pages;
+
 	while (i < n) {
 		unsigned long cur_pfn = start_pfn + i;
 		unsigned long left = n - i;
@@ -438,17 +441,29 @@  static unsigned long __init xen_set_identity_and_remap_chunk(
 	return remap_pfn;
 }
 
-static void __init xen_set_identity_and_remap(unsigned long nr_pages)
+static unsigned long __init xen_count_remap_pages(
+	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+	unsigned long remap_pages)
+{
+	if (start_pfn >= nr_pages)
+		return remap_pages;
+
+	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
+}
+
+static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
+			      unsigned long nr_pages, unsigned long last_val))
 {
 	phys_addr_t start = 0;
-	unsigned long last_pfn = nr_pages;
+	unsigned long ret_val = 0;
 	const struct e820entry *entry = xen_e820_map;
 	int i;
 
 	/*
 	 * Combine non-RAM regions and gaps until a RAM region (or the
-	 * end of the map) is reached, then set the 1:1 map and
-	 * remap the memory in those non-RAM regions.
+	 * end of the map) is reached, then call the provided function
+	 * to perform it's duty on the non-RAM region.
 	 *
 	 * The combined non-RAM regions are rounded to a whole number
 	 * of pages so any partial pages are accessible via the 1:1
@@ -466,14 +481,13 @@  static void __init xen_set_identity_and_remap(unsigned long nr_pages)
 				end_pfn = PFN_UP(entry->addr);
 
 			if (start_pfn < end_pfn)
-				last_pfn = xen_set_identity_and_remap_chunk(
-						start_pfn, end_pfn, nr_pages,
-						last_pfn);
+				ret_val = func(start_pfn, end_pfn, nr_pages,
+					       ret_val);
 			start = end;
 		}
 	}
 
-	pr_info("Released %ld page(s)\n", xen_released_pages);
+	return ret_val;
 }
 
 /*
@@ -596,35 +610,6 @@  static void __init xen_ignore_unusable(void)
 	}
 }
 
-static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
-{
-	unsigned long extra = 0;
-	unsigned long start_pfn, end_pfn;
-	const struct e820entry *entry = xen_e820_map;
-	int i;
-
-	end_pfn = 0;
-	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
-		start_pfn = PFN_DOWN(entry->addr);
-		/* Adjacent regions on non-page boundaries handling! */
-		end_pfn = min(end_pfn, start_pfn);
-
-		if (start_pfn >= max_pfn)
-			return extra + max_pfn - end_pfn;
-
-		/* Add any holes in map to result. */
-		extra += start_pfn - end_pfn;
-
-		end_pfn = PFN_UP(entry->addr + entry->size);
-		end_pfn = min(end_pfn, max_pfn);
-
-		if (entry->type != E820_RAM)
-			extra += end_pfn - start_pfn;
-	}
-
-	return extra;
-}
-
 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
 {
 	struct e820entry *entry;
@@ -804,7 +789,7 @@  char * __init xen_memory_setup(void)
 	max_pages = xen_get_max_pages();
 
 	/* How many extra pages do we need due to remapping? */
-	max_pages += xen_count_remap_pages(max_pfn);
+	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
 
 	if (max_pages > max_pfn)
 		extra_pages += max_pages - max_pfn;
@@ -922,7 +907,9 @@  char * __init xen_memory_setup(void)
 	 * Set identity map on non-RAM pages and prepare remapping the
 	 * underlying RAM.
 	 */
-	xen_set_identity_and_remap(max_pfn);
+	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
+
+	pr_info("Released %ld page(s)\n", xen_released_pages);
 
 	return "Xen";
 }
-- 
2.6.6