@@ -98,6 +98,9 @@ static inline void asi_init_thread_state(struct thread_struct *thread)
thread->intr_nest_depth = 0;
}
+int asi_load_module(struct module* module);
+void asi_unload_module(struct module* module);
+
static inline void asi_set_target_unrestricted(void)
{
if (static_cpu_has(X86_FEATURE_ASI)) {
@@ -5,6 +5,7 @@
#include <linux/memcontrol.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/asi.h>
#include <asm/pgalloc.h>
@@ -308,6 +309,71 @@ static int __init set_asi_param(char *str)
}
early_param("asi", set_asi_param);
+/* asi_load_module() is called from layout_and_allocate() in kernel/module.c
+ * We map the module and its data in init_mm.asi_pgd[0].
+*/
+int asi_load_module(struct module* module)
+{
+ int err = 0;
+
+ /* Map the cod/text */
+ err = asi_map(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base,
+ module->core_layout.ro_after_init_size );
+ if (err)
+ return err;
+
+ /* Map global variables annotated as non-sensitive for ASI */
+ err = asi_map(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base +
+ module->core_layout.asi_section_offset,
+ module->core_layout.asi_section_size );
+ if (err)
+ return err;
+
+ /* Map global variables annotated as non-sensitive for ASI */
+ err = asi_map(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base +
+ module->core_layout.asi_readmostly_section_offset,
+ module->core_layout.asi_readmostly_section_size);
+ if (err)
+ return err;
+
+ /* Map .data.once section as well */
+ err = asi_map(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base +
+ module->core_layout.once_section_offset,
+ module->core_layout.once_section_size );
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asi_load_module);
+
+void asi_unload_module(struct module* module)
+{
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base,
+ module->core_layout.ro_after_init_size, true);
+
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base +
+ module->core_layout.asi_section_offset,
+ module->core_layout.asi_section_size, true);
+
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base +
+ module->core_layout.asi_readmostly_section_offset,
+ module->core_layout.asi_readmostly_section_size, true);
+
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE,
+ module->core_layout.base +
+ module->core_layout.once_section_offset,
+ module->core_layout.once_section_size, true);
+
+}
+
static int __init asi_global_init(void)
{
uint i, n;
@@ -120,6 +120,7 @@ void asi_flush_tlb_range(struct asi *asi, void *addr, size_t len) { }
#define static_asi_enabled() false
+static inline int asi_load_module(struct module* module) {return 0;}
/* IMPORTANT: Any modification to the name here should also be applied to
* include/asm-generic/vmlinux.lds.h */
@@ -127,6 +128,8 @@ void asi_flush_tlb_range(struct asi *asi, void *addr, size_t len) { }
#define __asi_not_sensitive
#define __asi_not_sensitive_readmostly
+static inline void asi_unload_module(struct module* module) { }
+
#endif /* !_ASSEMBLY_ */
#endif /* !CONFIG_ADDRESS_SPACE_ISOLATION */
@@ -336,6 +336,15 @@ struct module_layout {
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
#endif
+
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ unsigned int asi_section_offset;
+ unsigned int asi_section_size;
+ unsigned int asi_readmostly_section_offset;
+ unsigned int asi_readmostly_section_size;
+ unsigned int once_section_offset;
+ unsigned int once_section_size;
+#endif
};
#ifdef CONFIG_MODULES_TREE_LOOKUP
@@ -2159,6 +2159,8 @@ static void free_module(struct module *mod)
{
trace_module_free(mod);
+ asi_unload_module(mod);
+
mod_sysfs_teardown(mod);
/*
@@ -2416,6 +2418,31 @@ static bool module_init_layout_section(const char *sname)
return module_init_section(sname);
}
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+static void asi_record_sections_layout(struct module *mod,
+ const char *sname,
+ Elf_Shdr *s)
+{
+ if (strstarts(sname, ASI_NON_SENSITIVE_READ_MOSTLY_SECTION_NAME)) {
+ mod->core_layout.asi_readmostly_section_offset = s->sh_entsize;
+ mod->core_layout.asi_readmostly_section_size = s->sh_size;
+ }
+ else if (strstarts(sname, ASI_NON_SENSITIVE_SECTION_NAME)) {
+ mod->core_layout.asi_section_offset = s->sh_entsize;
+ mod->core_layout.asi_section_size = s->sh_size;
+ }
+ if (strstarts(sname, ".data.once")) {
+ mod->core_layout.once_section_offset = s->sh_entsize;
+ mod->core_layout.once_section_size = s->sh_size;
+ }
+}
+#else
+static void asi_record_sections_layout(struct module *mod,
+ const char *sname,
+ Elf_Shdr *s)
+{}
+#endif
+
/*
* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
* might -- code, read-only data, read-write data, small data. Tally
@@ -2453,6 +2480,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
|| module_init_layout_section(sname))
continue;
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
+ asi_record_sections_layout(mod, sname, s);
pr_debug("\t%s\n", sname);
}
switch (m) {
@@ -3558,6 +3586,25 @@ static bool blacklisted(const char *module_name)
}
core_param(module_blacklist, module_blacklist, charp, 0400);
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+static void asi_fix_section_size_and_alignment(struct load_info *info,
+ char *section_to_fix)
+{
+ unsigned int ndx = find_sec(info, section_to_fix );
+ if (!ndx)
+ return;
+
+ info->sechdrs[ndx].sh_addralign = PAGE_SIZE;
+ info->sechdrs[ndx].sh_size =
+ ALIGN( info->sechdrs[ndx].sh_size, PAGE_SIZE );
+}
+#else
+static inline void asi_fix_section_size_and_alignment(struct load_info *info,
+ char *section_to_fix)
+{}
+#endif
+
+
static struct module *layout_and_allocate(struct load_info *info, int flags)
{
struct module *mod;
@@ -3600,6 +3647,15 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ /* These are sections we will want to map into an ASI page-table. We
+ * therefore need these sections to be aligned to a PAGE_SIZE */
+ asi_fix_section_size_and_alignment(info, ASI_NON_SENSITIVE_SECTION_NAME);
+ asi_fix_section_size_and_alignment(info,
+ ASI_NON_SENSITIVE_READ_MOSTLY_SECTION_NAME);
+ asi_fix_section_size_and_alignment(info, ".data.once");
+#endif
+
/*
* Determine total sizes, and put offsets in sh_entsize. For now
* this is done generically; there doesn't appear to be any
@@ -4127,6 +4183,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
/* Get rid of temporary copy. */
free_copy(info);
+ asi_load_module(mod);
+
/* Done! */
trace_module_load(mod);