@@ -21,4 +21,5 @@
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
#include <scsi/scsi_dh.h>
#include <asm/atomic.h>
@@ -34,4 +35,5 @@
struct priority_group *pg; /* Owning PG */
unsigned fail_count; /* Cumulative failure count */
+ struct kref ref_count;
struct path path;
@@ -129,4 +131,6 @@
memset(pgpath, 0, sizeof(*pgpath));
pgpath->path.is_active = 1;
+ kref_init(&pgpath->ref_count);
+ kref_get(&pgpath->ref_count);
INIT_WORK(&pgpath->activate_path, activate_path, pgpath);
}
@@ -140,4 +144,11 @@
}
+static void release_pgpath(struct kref *kref)
+{
+ struct pgpath *pgpath = container_of(kref, struct pgpath, ref_count);
+ free_pgpath(pgpath);
+
+}
+
static struct priority_group *alloc_priority_group(void)
{
@@ -164,5 +175,5 @@
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
- free_pgpath(pgpath);
+ kref_put(&pgpath->ref_count, release_pgpath);
}
}
@@ -449,6 +460,8 @@
if (queue_delayed_work(kmpath_handlerd,
&tmp->activate_path, m->pg_init_delay ?
- m->pg_init_delay_secs * HZ : 0))
+ m->pg_init_delay_secs * HZ : 0)) {
+ kref_get(&pgpath->ref_count);
m->pg_init_in_progress++;
+ }
}
}
@@ -971,6 +984,8 @@
queue_work(kmultipathd, &m->process_queued_ios);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
- if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path)) {
+ kref_get(&pgpath->ref_count);
m->pg_init_in_progress++;
+ }
}
@@ -1215,4 +1230,5 @@
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
pg_init_done, &pgpath->path);
+ kref_put(&pgpath->ref_count, release_pgpath);
}