Patchwork x86: do not free zero sized per cpu areas

login
register
mail settings
Submitter Ian Campbell
Date March 5, 2010, 7:49 p.m.
Message ID <1267818583-5627-1-git-send-email-ian.campbell@citrix.com>
Download mbox | patch
Permalink /patch/83832/
State New, archived
Headers show

Comments

Patch

diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b..89a3205 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -140,7 +140,8 @@  static void __init pcpu_fc_free(void *ptr, size_t size)
 #ifdef CONFIG_NO_BOOTMEM
 	u64 start = __pa(ptr);
 	u64 end = start + size;
-	free_early_partial(start, end);
+	if (start < end)
+		free_early_partial(start, end);
 #else
 	free_bootmem(__pa(ptr), size);
 #endif
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 3cb2c66..f3a861b 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -333,6 +333,11 @@  void __init free_early_partial(u64 start, u64 end)
 	struct early_res *r;
 	int i;
 
+	if (WARN_ONCE(start >= end,
+		      "free_early_partial got wrong start/end %#llx/%#llx\n",
+		      start, end))
+		return;
+
 try_next:
 	i = find_overlapped_early(start, end);
 	if (i >= max_early_res)