@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
+#include <linux/dma-mapping.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
#include <linux/kasan.h>
@@ -862,9 +863,18 @@ void __init setup_kmalloc_cache_index_table(void)
}
}
+static unsigned int __kmalloc_minalign(void)
+{
+ return dma_get_cache_alignment();
+}
+
void __init
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
{
+ unsigned int minalign = __kmalloc_minalign();
+ unsigned int aligned_size = kmalloc_info[idx].size;
+ int aligned_idx = idx;
+
if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
flags |= SLAB_RECLAIM_ACCOUNT;
} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
@@ -877,9 +887,17 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
flags |= SLAB_CACHE_DMA;
}
- kmalloc_caches[type][idx] = create_kmalloc_cache(
- kmalloc_info[idx].name[type],
- kmalloc_info[idx].size, flags);
+ if (minalign > ARCH_KMALLOC_MINALIGN) {
+ aligned_size = ALIGN(aligned_size, minalign);
+ aligned_idx = __kmalloc_index(aligned_size, false);
+ }
+
+ if (!kmalloc_caches[type][aligned_idx])
+ kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
+ kmalloc_info[aligned_idx].name[type],
+ aligned_size, flags);
+ if (idx != aligned_idx)
+ kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
/*
* If CONFIG_MEMCG_KMEM is enabled, disable cache merging for