@@ -102,6 +102,7 @@ config ARM64
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
+ select ARCH_WANT_KMALLOC_DMA_BOUNCE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
@@ -442,7 +442,12 @@ void __init bootmem_init(void)
*/
void __init mem_init(void)
{
- swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
+ bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
+
+ if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ swiotlb = true;
+
+ swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
/* this will put all unused low memory onto the freelists */
memblock_free_all();
With the DMA bouncing of unaligned kmalloc() buffers now in place, enable it for arm64 to allow the kmalloc-{8,16,32,48,96} caches. In addition, always create the swiotlb buffer even when the end of RAM is within the 32-bit physical address range (the swiotlb buffer can still be disabled on the kernel command line). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> --- arch/arm64/Kconfig | 1 + arch/arm64/mm/init.c | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-)