@@ -17,6 +17,7 @@
#ifdef CONFIG_SLOW_WORK
#include <linux/sysctl.h>
+#include <linux/module.h>
struct slow_work;
@@ -42,6 +43,7 @@ struct slow_work_ops {
* queued
*/
struct slow_work {
+ struct module *owner;
unsigned long flags;
#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
#define SLOW_WORK_EXECUTING 1 /* item currently executing */
@@ -61,6 +63,7 @@ struct slow_work {
static inline void slow_work_init(struct slow_work *work,
const struct slow_work_ops *ops)
{
+ work->owner = THIS_MODULE;
work->flags = 0;
work->ops = ops;
INIT_LIST_HEAD(&work->link);
@@ -78,6 +81,7 @@ static inline void slow_work_init(struct slow_work *work,
static inline void vslow_work_init(struct slow_work *work,
const struct slow_work_ops *ops)
{
+ work->owner = THIS_MODULE;
work->flags = 1 << SLOW_WORK_VERY_SLOW;
work->ops = ops;
INIT_LIST_HEAD(&work->link);
@@ -220,6 +220,7 @@ static bool slow_work_execute(void)
}
work->ops->put_ref(work);
+ module_put(work->owner);
return true;
auto_requeue:
@@ -299,6 +300,8 @@ int slow_work_enqueue(struct slow_work *work)
if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
} else {
+ if (!try_module_get(work->owner))
+ goto cant_get_mod;
if (work->ops->get_ref(work) < 0)
goto cant_get_ref;
if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
@@ -313,6 +316,8 @@ int slow_work_enqueue(struct slow_work *work)
return 0;
cant_get_ref:
+ module_put(work->owner);
+cant_get_mod:
spin_unlock_irqrestore(&slow_work_queue_lock, flags);
return -EAGAIN;
}
The slow_work facility was designed to use reference counting instead of barriers for synchronization. The reference counting mechanism is implemented as a vtable op (->get_ref, ->put_ref) callback. This is problematic for module use of the slow_work facility because it is impossible to synchronize against the .text installed in the callbacks: There is no way to ensure that the slow-work threads have completely exited the text in question and rmmod may yank it out from under the slow_work thread. This patch attempts to address this issue by transparently mapping "struct module* owner" to the slow_work item, and maintaining a module reference count coincident with the more externally visible reference count. Since the slow_work facility is resident in kernel, it should be a race-free location to issue a module_put() call. This will ensure that modules can properly cleanup before exiting. A module_get()/module_put() pair on slow_work_enqueue() and the subsequent dequeue technically adds the overhead of the atomic operations for every work item scheduled. However, slow_work is designed for deferring relatively long-running and/or sleepy tasks to begin with, so this overhead will hopefully be negligible. Signed-off-by: Gregory Haskins <ghaskins@novell.com> CC: David Howells <dhowells@redhat.com> --- include/linux/slow-work.h | 4 ++++ kernel/slow-work.c | 5 +++++ 2 files changed, 9 insertions(+), 0 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html