diff mbox series

[3/3] mm/memblock: Modify the default failure behavior of memblock_alloc_low(from)

Message ID 20250103105158.1350689-3-guoweikang.kernel@gmail.com (mailing list archive)
State New
Headers show
Series [1/3] mm/memblock: Modify the default failure behavior of memblock_alloc to panic | expand

Commit Message

Weikang Guo Jan. 3, 2025, 10:51 a.m. UTC
Just like memblock_alloc, the default failure behavior of memblock_alloc_low
and memblock_alloc_from is now modified to trigger a panic when allocation
fails.

Signed-off-by: Guo Weikang <guoweikang.kernel@gmail.com>
---
 arch/arc/mm/highmem.c       |  4 ----
 arch/csky/mm/init.c         |  5 ----
 arch/m68k/atari/stram.c     |  4 ----
 arch/m68k/mm/motorola.c     |  9 -------
 arch/mips/include/asm/dmi.h |  2 +-
 arch/mips/mm/init.c         |  5 ----
 arch/s390/kernel/setup.c    |  4 ----
 arch/s390/kernel/smp.c      |  3 ---
 arch/sparc/mm/init_64.c     | 13 ----------
 arch/um/kernel/mem.c        | 20 ----------------
 arch/xtensa/mm/mmu.c        |  4 ----
 include/linux/memblock.h    | 30 ++++++++++++-----------
 mm/memblock.c               | 47 +++++++++++++++++++++++++++++++++++++
 mm/percpu.c                 |  6 ++---
 14 files changed, 67 insertions(+), 89 deletions(-)

Comments

kernel test robot Jan. 4, 2025, 8:38 a.m. UTC | #1
Hi Guo,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]

url:    https://github.com/intel-lab-lkp/linux/commits/Guo-Weikang/mm-memblock-Modify-the-default-failure-behavior-of-memblock_alloc_raw-to-panic/20250103-185401
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20250103105158.1350689-3-guoweikang.kernel%40gmail.com
patch subject: [PATCH 3/3] mm/memblock: Modify the default failure behavior of memblock_alloc_low(from)
config: sparc-randconfig-001-20250104 (https://download.01.org/0day-ci/archive/20250104/202501041603.0c8v8Wbr-lkp@intel.com/config)
compiler: sparc64-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250104/202501041603.0c8v8Wbr-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501041603.0c8v8Wbr-lkp@intel.com/

All warnings (new ones prefixed by >>):

   arch/sparc/mm/init_64.c: In function 'arch_hugetlb_valid_size':
   arch/sparc/mm/init_64.c:361:24: warning: variable 'hv_pgsz_idx' set but not used [-Wunused-but-set-variable]
     361 |         unsigned short hv_pgsz_idx;
         |                        ^~~~~~~~~~~
   arch/sparc/mm/init_64.c: In function 'kernel_map_range':
>> arch/sparc/mm/init_64.c:1788:32: warning: variable 'new' set but not used [-Wunused-but-set-variable]
    1788 |                         pud_t *new;
         |                                ^~~
   arch/sparc/mm/init_64.c: In function 'sun4v_linear_pte_xor_finalize':
   arch/sparc/mm/init_64.c:2200:23: warning: variable 'pagecv_flag' set but not used [-Wunused-but-set-variable]
    2200 |         unsigned long pagecv_flag;
         |                       ^~~~~~~~~~~


vim +/new +1788 arch/sparc/mm/init_64.c

0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1764  
896aef430e5afb arch/sparc64/mm/init.c  Sam Ravnborg    2008-02-24  1765  static unsigned long __ref kernel_map_range(unsigned long pstart,
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1766  					    unsigned long pend, pgprot_t prot,
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1767  					    bool use_huge)
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1768  {
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1769  	unsigned long vstart = PAGE_OFFSET + pstart;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1770  	unsigned long vend = PAGE_OFFSET + pend;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1771  	unsigned long alloc_bytes = 0UL;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1772  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1773  	if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a5cef1c arch/sparc64/mm/init.c  David S. Miller 2005-09-29  1774  		prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1775  			    vstart, vend);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1776  		prom_halt();
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1777  	}
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1778  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1779  	while (vstart < vend) {
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1780  		unsigned long this_end, paddr = __pa(vstart);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1781  		pgd_t *pgd = pgd_offset_k(vstart);
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1782  		p4d_t *p4d;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1783  		pud_t *pud;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1784  		pmd_t *pmd;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1785  		pte_t *pte;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1786  
ac55c768143aa3 arch/sparc/mm/init_64.c David S. Miller 2014-09-26  1787  		if (pgd_none(*pgd)) {
ac55c768143aa3 arch/sparc/mm/init_64.c David S. Miller 2014-09-26 @1788  			pud_t *new;
ac55c768143aa3 arch/sparc/mm/init_64.c David S. Miller 2014-09-26  1789  
4fc4a09e4cc112 arch/sparc/mm/init_64.c Mike Rapoport   2018-10-30  1790  			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
4fc4a09e4cc112 arch/sparc/mm/init_64.c Mike Rapoport   2018-10-30  1791  						  PAGE_SIZE);
ac55c768143aa3 arch/sparc/mm/init_64.c David S. Miller 2014-09-26  1792  			alloc_bytes += PAGE_SIZE;
ac55c768143aa3 arch/sparc/mm/init_64.c David S. Miller 2014-09-26  1793  			pgd_populate(&init_mm, pgd, new);
ac55c768143aa3 arch/sparc/mm/init_64.c David S. Miller 2014-09-26  1794  		}
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1795  
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1796  		p4d = p4d_offset(pgd, vstart);
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1797  		if (p4d_none(*p4d)) {
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1798  			pud_t *new;
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1799  
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1800  			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1801  						  PAGE_SIZE);
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1802  			alloc_bytes += PAGE_SIZE;
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1803  			p4d_populate(&init_mm, p4d, new);
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1804  		}
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1805  
5637bc50483404 arch/sparc/mm/init_64.c Mike Rapoport   2019-11-24  1806  		pud = pud_offset(p4d, vstart);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1807  		if (pud_none(*pud)) {
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1808  			pmd_t *new;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1809  
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1810  			if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1811  				vstart = kernel_map_hugepud(vstart, vend, pud);
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1812  				continue;
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1813  			}
4fc4a09e4cc112 arch/sparc/mm/init_64.c Mike Rapoport   2018-10-30  1814  			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
4fc4a09e4cc112 arch/sparc/mm/init_64.c Mike Rapoport   2018-10-30  1815  						  PAGE_SIZE);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1816  			alloc_bytes += PAGE_SIZE;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1817  			pud_populate(&init_mm, pud, new);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1818  		}
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1819  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1820  		pmd = pmd_offset(pud, vstart);
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1821  		if (pmd_none(*pmd)) {
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1822  			pte_t *new;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1823  
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1824  			if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1825  				vstart = kernel_map_hugepmd(vstart, vend, pmd);
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1826  				continue;
0dd5b7b09e13da arch/sparc/mm/init_64.c David S. Miller 2014-09-24  1827  			}
4fc4a09e4cc112 arch/sparc/mm/init_64.c Mike Rapoport   2018-10-30  1828  			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
4fc4a09e4cc112 arch/sparc/mm/init_64.c Mike Rapoport   2018-10-30  1829  						  PAGE_SIZE);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1830  			alloc_bytes += PAGE_SIZE;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1831  			pmd_populate_kernel(&init_mm, pmd, new);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1832  		}
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1833  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1834  		pte = pte_offset_kernel(pmd, vstart);
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1835  		this_end = (vstart + PMD_SIZE) & PMD_MASK;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1836  		if (this_end > vend)
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1837  			this_end = vend;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1838  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1839  		while (vstart < this_end) {
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1840  			pte_val(*pte) = (paddr | pgprot_val(prot));
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1841  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1842  			vstart += PAGE_SIZE;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1843  			paddr += PAGE_SIZE;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1844  			pte++;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1845  		}
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1846  	}
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1847  
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1848  	return alloc_bytes;
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1849  }
56425306517ef2 arch/sparc64/mm/init.c  David S. Miller 2005-09-25  1850
diff mbox series

Patch

diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index c79912a6b196..4ed597b19388 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -53,10 +53,6 @@  static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
 	pte_t *pte_k;
 
 	pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!pte_k)
-		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	pmd_populate_kernel(&init_mm, pmd_k, pte_k);
 	return pte_k;
 }
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bde7cabd23df..04de02a83564 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -174,11 +174,6 @@  void __init fixrange_init(unsigned long start, unsigned long end,
 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 				if (pmd_none(*pmd)) {
 					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-					if (!pte)
-						panic("%s: Failed to allocate %lu bytes align=%lx\n",
-						      __func__, PAGE_SIZE,
-						      PAGE_SIZE);
-
 					set_pmd(pmd, __pmd(__pa(pte)));
 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
 				}
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 922e53bcb853..14f761330b29 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -96,10 +96,6 @@  void __init atari_stram_reserve_pages(void *start_mem)
 		pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
 		stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
 								       PAGE_SIZE);
-		if (!stram_pool.start)
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-			      __func__, pool_size, PAGE_SIZE);
-
 		stram_pool.end = stram_pool.start + pool_size - 1;
 		request_resource(&iomem_resource, &stram_pool);
 		stram_virt_offset = 0;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index ce016ae8c972..83bbada15be2 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -227,11 +227,6 @@  static pte_t * __init kernel_page_table(void)
 
 	if (PAGE_ALIGNED(last_pte_table)) {
 		pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-		if (!pte_table) {
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-					__func__, PAGE_SIZE, PAGE_SIZE);
-		}
-
 		clear_page(pte_table);
 		mmu_page_ctor(pte_table);
 
@@ -275,10 +270,6 @@  static pmd_t * __init kernel_ptr_table(void)
 	last_pmd_table += PTRS_PER_PMD;
 	if (PAGE_ALIGNED(last_pmd_table)) {
 		last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-		if (!last_pmd_table)
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-			      __func__, PAGE_SIZE, PAGE_SIZE);
-
 		clear_page(last_pmd_table);
 		mmu_page_ctor(last_pmd_table);
 	}
diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
index dc397f630c66..9698d072cc4d 100644
--- a/arch/mips/include/asm/dmi.h
+++ b/arch/mips/include/asm/dmi.h
@@ -11,7 +11,7 @@ 
 #define dmi_unmap(x)			iounmap(x)
 
 /* MIPS initialize DMI scan before SLAB is ready, so we use memblock here */
-#define dmi_alloc(l)			memblock_alloc_low(l, PAGE_SIZE)
+#define dmi_alloc(l)			memblock_alloc_low_no_panic(l, PAGE_SIZE)
 
 #if defined(CONFIG_MACH_LOONGSON64)
 #define SMBIOS_ENTRY_POINT_SCAN_START	0xFFFE000
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4583d1a2a73e..cca62f23769f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -257,11 +257,6 @@  void __init fixrange_init(unsigned long start, unsigned long end,
 				if (pmd_none(*pmd)) {
 					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 									   PAGE_SIZE);
-					if (!pte)
-						panic("%s: Failed to allocate %lu bytes align=%lx\n",
-						      __func__, PAGE_SIZE,
-						      PAGE_SIZE);
-
 					set_pmd(pmd, __pmd((unsigned long)pte));
 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
 				}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e51426113f26..854d3744dacf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -397,10 +397,6 @@  static void __init setup_lowcore(void)
 	 */
 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
 	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
-	if (!lc)
-		panic("%s: Failed to allocate %zu bytes align=%zx\n",
-		      __func__, sizeof(*lc), sizeof(*lc));
-
 	lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
 	lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
 	lc->restart_psw.addr = __pa(restart_int_handler);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9eb4508b4ca4..467d4f390837 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -631,9 +631,6 @@  void __init smp_save_dump_secondary_cpus(void)
 		return;
 	/* Allocate a page as dumping area for the store status sigps */
 	page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!page)
-		panic("ERROR: Failed to allocate %lx bytes below %lx\n",
-		      PAGE_SIZE, 1UL << 31);
 
 	/* Set multi-threading state to the previous system. */
 	pcpu_set_smt(sclp.mtid_prev);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 05882bca5b73..8c813c755eb8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1789,8 +1789,6 @@  static unsigned long __ref kernel_map_range(unsigned long pstart,
 
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			pgd_populate(&init_mm, pgd, new);
 		}
@@ -1801,8 +1799,6 @@  static unsigned long __ref kernel_map_range(unsigned long pstart,
 
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			p4d_populate(&init_mm, p4d, new);
 		}
@@ -1817,8 +1813,6 @@  static unsigned long __ref kernel_map_range(unsigned long pstart,
 			}
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			pud_populate(&init_mm, pud, new);
 		}
@@ -1833,8 +1827,6 @@  static unsigned long __ref kernel_map_range(unsigned long pstart,
 			}
 			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
 						  PAGE_SIZE);
-			if (!new)
-				goto err_alloc;
 			alloc_bytes += PAGE_SIZE;
 			pmd_populate_kernel(&init_mm, pmd, new);
 		}
@@ -1854,11 +1846,6 @@  static unsigned long __ref kernel_map_range(unsigned long pstart,
 	}
 
 	return alloc_bytes;
-
-err_alloc:
-	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
-	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
-	return -ENOMEM;
 }
 
 static void __init flush_all_kernel_tsbs(void)
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 53248ed04771..9c161fb4ed3a 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -83,10 +83,6 @@  static void __init one_page_table_init(pmd_t *pmd)
 	if (pmd_none(*pmd)) {
 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 							  PAGE_SIZE);
-		if (!pte)
-			panic("%s: Failed to allocate %lu bytes align=%lx\n",
-			      __func__, PAGE_SIZE, PAGE_SIZE);
-
 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
 					   (unsigned long) __pa(pte)));
 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
@@ -97,10 +93,6 @@  static void __init one_md_table_init(pud_t *pud)
 {
 #if CONFIG_PGTABLE_LEVELS > 2
 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!pmd_table)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
 	BUG_ON(pmd_table != pmd_offset(pud, 0));
 #endif
@@ -110,10 +102,6 @@  static void __init one_ud_table_init(p4d_t *p4d)
 {
 #if CONFIG_PGTABLE_LEVELS > 3
 	pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-	if (!pud_table)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
 	BUG_ON(pud_table != pud_offset(p4d, 0));
 #endif
@@ -163,10 +151,6 @@  static void __init fixaddr_user_init( void)
 
 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
-	if (!v)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, size, PAGE_SIZE);
-
 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
 	p = __pa(v);
 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -184,10 +168,6 @@  void __init paging_init(void)
 
 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
 							       PAGE_SIZE);
-	if (!empty_zero_page)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, PAGE_SIZE, PAGE_SIZE);
-
 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
 	free_area_init(max_zone_pfn);
 
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 92e158c69c10..aee020c986a3 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -33,10 +33,6 @@  static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
 		 __func__, vaddr, n_pages);
 
 	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
-	if (!pte)
-		panic("%s: Failed to allocate %lu bytes align=%lx\n",
-		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
-
 	for (i = 0; i < n_pages; ++i)
 		pte_clear(NULL, 0, pte + i);
 
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b68c141ebc44..3f940bf628a9 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -430,20 +430,22 @@  void *__memblock_alloc_panic(phys_addr_t size, phys_addr_t align,
 #define memblock_alloc_raw_no_panic(size, align)    \
 	 __memblock_alloc_panic(size, align, __func__, false, true)
 
-static inline void *memblock_alloc_from(phys_addr_t size,
-						phys_addr_t align,
-						phys_addr_t min_addr)
-{
-	return memblock_alloc_try_nid(size, align, min_addr,
-				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
-}
-
-static inline void *memblock_alloc_low(phys_addr_t size,
-					       phys_addr_t align)
-{
-	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
-				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
-}
+void *__memblock_alloc_from_panic(phys_addr_t size, phys_addr_t align,
+				  phys_addr_t min_addr,const char *func,
+				  bool should_panic);
+
+#define memblock_alloc_from(size, align, min_addr)    \
+	 __memblock_alloc_from_panic(size, align, min_addr,  __func__, true)
+#define memblock_alloc_from_no_panic(size, align, min_addr)    \
+	 __memblock_alloc_from_panic(size, align, min_addr, __func__, false)
+
+void *__memblock_alloc_low_panic(phys_addr_t size, phys_addr_t align,
+				 const char *func, bool should_panic);
+
+#define memblock_alloc_low(size, align)    \
+	 __memblock_alloc_low_panic(size, align, __func__, true)
+#define memblock_alloc_low_no_panic(size, align)    \
+	 __memblock_alloc_low_panic(size, align, __func__, false)
 
 static inline void *memblock_alloc_node(phys_addr_t size,
 						phys_addr_t align, int nid)
diff --git a/mm/memblock.c b/mm/memblock.c
index 4974ae2ee5ec..22922c81ff77 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1722,6 +1722,53 @@  void *__init __memblock_alloc_panic(phys_addr_t size, phys_addr_t align,
 	return addr;
 }
 
+/**
+ * __memblock_alloc_from_panic - Try to allocate memory and panic on failure
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region from where the allocation
+ *	  is preferred (phys address)
+ * @func: caller func name
+ * @should_panic: whether failed panic
+ *
+ * In case of failure, it calls panic with the formatted message.
+ * This function should not be used directly, please use the macro
+ * memblock_alloc_from and memblock_alloc_from_no_panic.
+ */
+void *__init __memblock_alloc_from_panic(phys_addr_t size, phys_addr_t align,
+				    phys_addr_t min_addr, const char *func,
+				    bool should_panic)
+{
+	void *addr = memblock_alloc_try_nid(size, align, min_addr,
+				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+
+	if (unlikely(!addr && should_panic))
+		panic("%s: Failed to allocate %pap bytes\n", func, &size);
+	return addr;
+}
+
+/**
+ * __memblock_alloc_low_panic - Try to allocate memory and panic on failure
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @func: caller func name
+ * @should_panic: whether failed panic
+ *
+ * In case of failure, it calls panic with the formatted message.
+ * This function should not be used directly, please use the macro
+ * memblock_alloc_low and memblock_alloc_low_no_panic.
+ */
+void *__init __memblock_alloc_low_panic(phys_addr_t size, phys_addr_t align,
+					const char *func,bool should_panic)
+{
+	void *addr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+
+	if (unlikely(!addr && should_panic))
+		panic("%s: Failed to allocate %pap bytes\n", func, &size);
+	return addr;
+}
+
 /**
  * memblock_free_late - free pages directly to buddy allocator
  * @base: phys starting address of the  boot memory block
diff --git a/mm/percpu.c b/mm/percpu.c
index a381d626ed32..980fba4292be 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2933,7 +2933,7 @@  static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
 		node = cpu_to_nd_fn(cpu);
 
 	if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
-		ptr = memblock_alloc_from(size, align, goal);
+		ptr = memblock_alloc_from_no_panic(size, align, goal);
 		pr_info("cpu %d has no node %d or node-local memory\n",
 			cpu, node);
 		pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
@@ -2948,7 +2948,7 @@  static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
 	}
 	return ptr;
 #else
-	return memblock_alloc_from(size, align, goal);
+	return memblock_alloc_from_no_panic(size, align, goal);
 #endif
 }
 
@@ -3318,7 +3318,7 @@  void __init setup_per_cpu_areas(void)
 
 	ai = pcpu_alloc_alloc_info(1, 1);
 	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-	if (!ai || !fc)
+	if (!ai)
 		panic("Failed to allocate memory for percpu areas.");
 	/* kmemleak tracks the percpu allocations separately */
 	kmemleak_ignore_phys(__pa(fc));