@@ -38,7 +38,7 @@
#endif
#ifdef CONFIG_MMU
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
gfp_t gfp_mask = GFP_KERNEL;
void *p;
@@ -30,7 +30,7 @@
#include <asm/insn.h>
#include <asm/sections.h>
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
gfp_t gfp_mask = GFP_KERNEL;
void *p;
@@ -45,7 +45,7 @@ static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
#ifdef MODULE_START
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
@@ -7,7 +7,7 @@
#include <linux/moduleloader.h>
#include <asm/pgtable.h>
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
@@ -28,7 +28,7 @@
* from 0x80000000 (vmalloc area) to 0xc00000000 (kernel) (kmalloc returns
* addresses in 0xc0000000)
*/
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_memfree(void *module_region)
+void arch_module_memfree(void *module_region)
{
kfree(module_region);
}
@@ -213,7 +213,7 @@ static inline int reassemble_22(int as22)
((as22 & 0x0003ff) << 3));
}
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
/* using RWX means less protection for modules, but it's
* easier than trying to map the text, data, init_text and
@@ -30,7 +30,7 @@
#define PLT_ENTRY_SIZE 20
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
@@ -40,7 +40,7 @@ static void *module_map(unsigned long size)
}
#endif /* CONFIG_SPARC64 */
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
void *ret;
@@ -22,7 +22,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
@@ -77,7 +77,7 @@ static unsigned long int get_module_load_offset(void)
}
#endif
-void *module_alloc(unsigned long size)
+void *arch_module_alloc(unsigned long size)
{
void *p;
@@ -2110,11 +2110,16 @@ static void free_module_elf(struct module *mod)
}
#endif /* CONFIG_LIVEPATCH */
-void __weak module_memfree(void *module_region)
+void __weak arch_module_memfree(void *module_region)
{
vfree(module_region);
}
+void module_memfree(void *module_region)
+{
+ arch_module_memfree(module_region);
+}
+
void __weak module_arch_cleanup(struct module *mod)
{
}
@@ -2728,11 +2733,16 @@ static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
ddebug_remove_module(mod->name);
}
-void * __weak module_alloc(unsigned long size)
+void * __weak arch_module_alloc(unsigned long size)
{
return vmalloc_exec(size);
}
+void *module_alloc(unsigned long size)
+{
+ return arch_module_alloc(size);
+}
+
#ifdef CONFIG_DEBUG_KMEMLEAK
static void kmemleak_load_module(const struct module *mod,
const struct load_info *info)
In prep for module space rlimit, create a singular cross platform module_alloc and module_memfree that call into arch specific implementations. This has only been tested on x86. Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> --- arch/arm/kernel/module.c | 2 +- arch/arm64/kernel/module.c | 2 +- arch/mips/kernel/module.c | 2 +- arch/nds32/kernel/module.c | 2 +- arch/nios2/kernel/module.c | 4 ++-- arch/parisc/kernel/module.c | 2 +- arch/s390/kernel/module.c | 2 +- arch/sparc/kernel/module.c | 2 +- arch/unicore32/kernel/module.c | 2 +- arch/x86/kernel/module.c | 2 +- kernel/module.c | 14 ++++++++++++-- 11 files changed, 23 insertions(+), 13 deletions(-)