@@ -107,6 +107,7 @@ static int remove_and_add_spares(struct mddev *mddev,
static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
+static inline struct mddev *mddev_get(struct mddev *mddev);
/*
* Default number of read corrections we'll attempt on an rdev
@@ -323,6 +324,24 @@ static int start_readonly;
*/
static bool create_on_open = true;
+/*
+ * Enables to iterate over all existing md arrays
+ * all_mddevs_lock protects this list.
+ */
+static LIST_HEAD(all_mddevs);
+static DEFINE_SPINLOCK(all_mddevs_lock);
+
+/*
+ * Send every new event to the userspace.
+ */
+static void md_kobject_uevent_fn(struct work_struct *work)
+{
+ struct mddev *mddev = container_of(work, struct mddev, uevent_work);
+
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+ mddev_put(mddev);
+}
+
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -335,20 +354,29 @@ static bool create_on_open = true;
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
-void md_new_event(void)
+
+void md_new_event(struct mddev *mddev, bool sync)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
+
+ if (mddev_is_dm(mddev))
+ return;
+
+ if (sync)
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+ else {
+ spin_lock(&all_mddevs_lock);
+ mddev = mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+ if (mddev == NULL) {
+ return;
+ }
+ queue_work(md_wq, &mddev->uevent_work);
+ }
}
EXPORT_SYMBOL_GPL(md_new_event);
-/*
- * Enables to iterate over all existing md arrays
- * all_mddevs_lock protects this list.
- */
-static LIST_HEAD(all_mddevs);
-static DEFINE_SPINLOCK(all_mddevs_lock);
-
static bool is_md_suspended(struct mddev *mddev)
{
return percpu_ref_is_dying(&mddev->active_io);
@@ -773,6 +801,7 @@ int mddev_init(struct mddev *mddev)
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
+ INIT_WORK(&mddev->uevent_work, md_kobject_uevent_fn);
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
@@ -2898,7 +2927,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_new_event();
+ md_new_event(mddev, true);
return 0;
}
@@ -3015,7 +3044,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
md_kick_rdev_from_array(rdev);
if (mddev->pers)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- md_new_event();
+ md_new_event(mddev, true);
}
}
} else if (cmd_match(buf, "writemostly")) {
@@ -4131,7 +4160,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify_dirent_safe(mddev->sysfs_level);
- md_new_event();
+ md_new_event(mddev, true);
rv = len;
out_unlock:
mddev_unlock_and_resume(mddev);
@@ -4658,7 +4687,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
export_rdev(rdev, mddev);
mddev_unlock_and_resume(mddev);
if (!err)
- md_new_event();
+ md_new_event(mddev, true);
return err ? err : len;
}
@@ -6276,7 +6305,7 @@ int md_run(struct mddev *mddev)
if (mddev->sb_flags)
md_update_sb(mddev, 0);
- md_new_event();
+ md_new_event(mddev, true);
return 0;
bitmap_abort:
@@ -6635,7 +6664,7 @@ static int do_md_stop(struct mddev *mddev, int mode)
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
- md_new_event();
+ md_new_event(mddev, true);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@@ -7131,7 +7160,7 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->thread)
md_update_sb(mddev, 1);
- md_new_event();
+ md_new_event(mddev, true);
return 0;
busy:
@@ -7202,7 +7231,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_new_event();
+ md_new_event(mddev, true);
return 0;
abort_export:
@@ -8176,7 +8205,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
}
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
- md_new_event();
+ md_new_event(mddev, false);
}
EXPORT_SYMBOL(md_error);
@@ -9073,7 +9102,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
- md_new_event();
+ md_new_event(mddev, true);
update_time = jiffies;
blk_start_plug(&plug);
@@ -9145,7 +9174,7 @@ void md_do_sync(struct md_thread *thread)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
- md_new_event();
+ md_new_event(mddev, true);
if (last_check + window > io_sectors || j == max_sectors)
continue;
@@ -9411,7 +9440,7 @@ static int remove_and_add_spares(struct mddev *mddev,
sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
- md_new_event();
+ md_new_event(mddev, true);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@ -9530,7 +9559,7 @@ static void md_start_sync(struct work_struct *ws)
__mddev_resume(mddev, false);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event();
+ md_new_event(mddev, true);
return;
not_running:
@@ -9782,7 +9811,7 @@ void md_reap_sync_thread(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event();
+ md_new_event(mddev, true);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
wake_up(&resync_wait);
@@ -582,6 +582,7 @@ struct mddev {
*/
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
+ struct work_struct uevent_work;
mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
@@ -883,7 +884,7 @@ extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, blk_opf_t opf, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
-extern void md_new_event(void);
+extern void md_new_event(struct mddev *mddev, bool sync);
extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
@@ -4542,7 +4542,7 @@ static int raid10_start_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_new_event();
+ md_new_event(mddev, true);
return 0;
abort:
@@ -8525,7 +8525,7 @@ static int raid5_start_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_new_event();
+ md_new_event(mddev, true);
return 0;
}