@@ -67,7 +67,7 @@
/*
* Mutex protects:
- * 1) List of modules (also safely readable with preempt_disable),
+ * 1) List of modules (also safely readable within RCU read section),
* 2) module_use links,
* 3) mod_tree.addr_min/mod_tree.addr_max.
* (delete and add uses RCU list operations).
@@ -1348,7 +1348,7 @@ static void free_module(struct module *mod)
mod_tree_remove(mod);
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod);
- /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
+ /* Wait for RCU synchronizing before releasing mod->list and buglist. */
synchronize_rcu();
if (try_add_tainted_module(mod))
pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
@@ -2965,7 +2965,7 @@ static noinline int do_init_module(struct module *mod)
#endif
/*
* We want to free module_init, but be aware that kallsyms may be
- * walking this with preempt disabled. In all the failure paths, we
+ * walking this within an RCU read section. In all the failure paths, we
* call synchronize_rcu(), but we don't want to slow down the success
* path. execmem_free() cannot be called in an interrupt, so do the
* work and call synchronize_rcu() in a work queue.
@@ -3754,7 +3754,7 @@ void print_modules(void)
printk(KERN_DEFAULT "Modules linked in:");
/* Most callers should already have preempt disabled, but make sure */
- preempt_disable();
+ guard(rcu)();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -3762,7 +3762,6 @@ void print_modules(void)
}
print_unloaded_tainted_modules();
- preempt_enable();
if (last_unloaded_module.name[0])
pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name,
last_unloaded_module.taints);
@@ -12,11 +12,11 @@
/*
* Use a latched RB-tree for __module_address(); this allows us to use
- * RCU-sched lookups of the address from any context.
+ * RCU lookups of the address from any context.
*
- * This is conditional on PERF_EVENTS || TRACING because those can really hit
- * __module_address() hard by doing a lot of stack unwinding; potentially from
- * NMI context.
+ * This is conditional on PERF_EVENTS || TRACING || CFI_CLANG because those can
+ * really hit __module_address() hard by doing a lot of stack unwinding;
+ * potentially from NMI context.
*/
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)