@@ -138,6 +138,13 @@ static int dev_parms(struct raid_set *rs, char **argv)
return 0;
}
+static void do_table_event(struct work_struct *ws)
+{
+ struct raid_set *rs = container_of(ws, struct raid_set,
+ md.event_work);
+ dm_table_event(rs->ti->table);
+}
+
/*
* Construct a RAID4/5/6 mapping:
* Args:
@@ -289,6 +296,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (rs->md.raid_disks - in_sync > rt->parity_devs)
goto err;
+ INIT_WORK(&rs->md.event_work, do_table_event);
ti->split_io = rs->md.chunk_sectors;
ti->private = rs;
@@ -6005,6 +6005,8 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ if (mddev->event_work.func)
+ schedule_work(&mddev->event_work);
md_new_event_inintr(mddev);
}
@@ -309,6 +309,7 @@ struct mddev_s
struct bio *barrier;
atomic_t flush_pending;
struct work_struct barrier_work;
+ struct work_struct event_work;
};