diff mbox series

Revert "mm: Always release pages to the buddy allocator in memblock_free_late()."

Message ID 20230207082151.1303-1-dev@aaront.org (mailing list archive)
State New
Headers show
Series Revert "mm: Always release pages to the buddy allocator in memblock_free_late()." | expand

Commit Message

Aaron Thompson Feb. 7, 2023, 8:21 a.m. UTC
This reverts commit 115d9d77bb0f9152c60b6e8646369fa7f6167593.

The pages being freed by memblock_free_late() have already been
initialized, but if they are in the deferred init range,
__free_one_page() might access nearby uninitialized pages when trying to
coalesce buddies. This can, for example, trigger this BUG:

  BUG: unable to handle page fault for address: ffffe964c02580c8
  RIP: 0010:__list_del_entry_valid+0x3f/0x70
   <TASK>
   __free_one_page+0x139/0x410
   __free_pages_ok+0x21d/0x450
   memblock_free_late+0x8c/0xb9
   efi_free_boot_services+0x16b/0x25c
   efi_enter_virtual_mode+0x403/0x446
   start_kernel+0x678/0x714
   secondary_startup_64_no_verify+0xd2/0xdb
   </TASK>

A proper fix will be more involved so revert this change for the time
being.

Fixes: 115d9d77bb0f ("mm: Always release pages to the buddy allocator in memblock_free_late().")
Signed-off-by: Aaron Thompson <dev@aaront.org>
---
 mm/memblock.c                     | 8 +-------
 tools/testing/memblock/internal.h | 4 ----
 2 files changed, 1 insertion(+), 11 deletions(-)

Comments

Mike Rapoport Feb. 7, 2023, 11:08 a.m. UTC | #1
On Tue, Feb 07, 2023 at 08:21:51AM +0000, Aaron Thompson wrote:
> This reverts commit 115d9d77bb0f9152c60b6e8646369fa7f6167593.
> 
> The pages being freed by memblock_free_late() have already been
> initialized, but if they are in the deferred init range,
> __free_one_page() might access nearby uninitialized pages when trying to
> coalesce buddies. This can, for example, trigger this BUG:
> 
>   BUG: unable to handle page fault for address: ffffe964c02580c8
>   RIP: 0010:__list_del_entry_valid+0x3f/0x70
>    <TASK>
>    __free_one_page+0x139/0x410
>    __free_pages_ok+0x21d/0x450
>    memblock_free_late+0x8c/0xb9
>    efi_free_boot_services+0x16b/0x25c
>    efi_enter_virtual_mode+0x403/0x446
>    start_kernel+0x678/0x714
>    secondary_startup_64_no_verify+0xd2/0xdb
>    </TASK>
> 
> A proper fix will be more involved so revert this change for the time
> being.
> 
> Fixes: 115d9d77bb0f ("mm: Always release pages to the buddy allocator in memblock_free_late().")
> Signed-off-by: Aaron Thompson <dev@aaront.org>
> ---
>  mm/memblock.c                     | 8 +-------
>  tools/testing/memblock/internal.h | 4 ----
>  2 files changed, 1 insertion(+), 11 deletions(-)

Applied, thanks!
 
> diff --git a/mm/memblock.c b/mm/memblock.c
> index 685e30e6d27c..d036c7861310 100644
> --- a/mm/memblock.c
> +++ b/mm/memblock.c
> @@ -1640,13 +1640,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
>  	end = PFN_DOWN(base + size);
>  
>  	for (; cursor < end; cursor++) {
> -		/*
> -		 * Reserved pages are always initialized by the end of
> -		 * memblock_free_all() (by memmap_init() and, if deferred
> -		 * initialization is enabled, memmap_init_reserved_pages()), so
> -		 * these pages can be released directly to the buddy allocator.
> -		 */
> -		__free_pages_core(pfn_to_page(cursor), 0);
> +		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
>  		totalram_pages_inc();
>  	}
>  }
> diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
> index 85973e55489e..fdb7f5db7308 100644
> --- a/tools/testing/memblock/internal.h
> +++ b/tools/testing/memblock/internal.h
> @@ -15,10 +15,6 @@ bool mirrored_kernelcore = false;
>  
>  struct page {};
>  
> -void __free_pages_core(struct page *page, unsigned int order)
> -{
> -}
> -
>  void memblock_free_pages(struct page *page, unsigned long pfn,
>  			 unsigned int order)
>  {
> -- 
> 2.30.2
> 
>
diff mbox series

Patch

diff --git a/mm/memblock.c b/mm/memblock.c
index 685e30e6d27c..d036c7861310 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1640,13 +1640,7 @@  void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
 	end = PFN_DOWN(base + size);
 
 	for (; cursor < end; cursor++) {
-		/*
-		 * Reserved pages are always initialized by the end of
-		 * memblock_free_all() (by memmap_init() and, if deferred
-		 * initialization is enabled, memmap_init_reserved_pages()), so
-		 * these pages can be released directly to the buddy allocator.
-		 */
-		__free_pages_core(pfn_to_page(cursor), 0);
+		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
 		totalram_pages_inc();
 	}
 }
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 85973e55489e..fdb7f5db7308 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -15,10 +15,6 @@  bool mirrored_kernelcore = false;
 
 struct page {};
 
-void __free_pages_core(struct page *page, unsigned int order)
-{
-}
-
 void memblock_free_pages(struct page *page, unsigned long pfn,
 			 unsigned int order)
 {