diff mbox series

[v4,14/15] mm: slab: Reduce the kmalloc() minimum alignment if DMA bouncing possible

Message ID 20230518173403.1150549-15-catalin.marinas@arm.com (mailing list archive)
State New, archived
Headers show
Series mm, dma, arm64: Reduce ARCH_KMALLOC_MINALIGN to 8 | expand

Commit Message

Catalin Marinas May 18, 2023, 5:34 p.m. UTC
If an architecture opted in to DMA bouncing of unaligned kmalloc()
buffers (ARCH_WANT_KMALLOC_DMA_BOUNCE), reduce the minimum kmalloc()
cache alignment below cache-line size to ARCH_KMALLOC_MINALIGN.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Robin Murphy <robin.murphy@arm.com>
---
 mm/slab_common.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

Comments

Catalin Marinas May 19, 2023, 11 a.m. UTC | #1
On Thu, May 18, 2023 at 06:34:02PM +0100, Catalin Marinas wrote:
> If an architecture opted in to DMA bouncing of unaligned kmalloc()
> buffers (ARCH_WANT_KMALLOC_DMA_BOUNCE), reduce the minimum kmalloc()
> cache alignment below cache-line size to ARCH_KMALLOC_MINALIGN.
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Robin Murphy <robin.murphy@arm.com>
> ---
>  mm/slab_common.c | 9 ++++++++-
>  1 file changed, 8 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 7c6475847fdf..84e5a5e435d6 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -18,6 +18,7 @@
>  #include <linux/uaccess.h>
>  #include <linux/seq_file.h>
>  #include <linux/dma-mapping.h>
> +#include <linux/swiotlb.h>
>  #include <linux/proc_fs.h>
>  #include <linux/debugfs.h>
>  #include <linux/kasan.h>
> @@ -865,7 +866,13 @@ void __init setup_kmalloc_cache_index_table(void)
>  
>  static unsigned int __kmalloc_minalign(void)
>  {
> -	return dma_get_cache_alignment();
> +	int cache_align = dma_get_cache_alignment();
> +
> +	if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) ||
> +	    io_tlb_default_mem.nslabs == 0)
> +		return cache_align;
> +
> +	return ARCH_KMALLOC_MINALIGN;
>  }

This gives a build error if the architecture doesn't select SWIOTLB (I
had this done properly in v3 but for some reason I rewrote it here). The
fixup is to add #ifdefs. I'll fold this in for v5:

diff --git a/mm/slab_common.c b/mm/slab_common.c
index 84e5a5e435d6..fe46459a8b77 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -864,16 +864,19 @@ void __init setup_kmalloc_cache_index_table(void)
 	}
 }
 
+#ifdef CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
 static unsigned int __kmalloc_minalign(void)
 {
-	int cache_align = dma_get_cache_alignment();
-
-	if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) ||
-	    io_tlb_default_mem.nslabs == 0)
-		return cache_align;
-
-	return ARCH_KMALLOC_MINALIGN;
+	if (io_tlb_default_mem.nslabs)
+		return ARCH_KMALLOC_MINALIGN;
+	return dma_get_cache_alignment();
 }
+#else
+static unsigned int __kmalloc_minalign(void)
+{
+	return dma_get_cache_alignment();
+}
+#endif
 
 void __init
 new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
diff mbox series

Patch

diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7c6475847fdf..84e5a5e435d6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -18,6 +18,7 @@ 
 #include <linux/uaccess.h>
 #include <linux/seq_file.h>
 #include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
 #include <linux/proc_fs.h>
 #include <linux/debugfs.h>
 #include <linux/kasan.h>
@@ -865,7 +866,13 @@  void __init setup_kmalloc_cache_index_table(void)
 
 static unsigned int __kmalloc_minalign(void)
 {
-	return dma_get_cache_alignment();
+	int cache_align = dma_get_cache_alignment();
+
+	if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) ||
+	    io_tlb_default_mem.nslabs == 0)
+		return cache_align;
+
+	return ARCH_KMALLOC_MINALIGN;
 }
 
 void __init