@@ -16,6 +16,7 @@ struct raid_set {
struct dm_target *ti;
struct mddev_s md;
struct raid_type *raid_type;
+ struct target_callbacks callbacks;
struct raid_dev dev[0];
};
@@ -145,6 +146,13 @@ static void do_table_event(struct work_struct *ws)
dm_table_event(rs->ti->table);
}
+static int raid_is_congested(void *v, int bits)
+{
+ struct target_callbacks *cb = v;
+ struct raid_set *rs = container_of(cb, struct raid_set,
+ callbacks);
+ return md_raid5_congested(&rs->md, bits);
+}
/*
* Construct a RAID4/5/6 mapping:
* Args:
@@ -308,6 +316,10 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (errnum)
goto err;
+
+ rs->callbacks.congested_fn = raid_is_congested;
+ dm_table_add_callbacks(ti->table, &rs->callbacks);
+
return 0;
err:
if (rs)
@@ -320,6 +332,7 @@ static void raid_dtr(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
+ list_del_init(&rs->callbacks.list);
md_stop(&rs->md);
context_free(rs);
}
@@ -68,6 +68,8 @@ struct dm_table {
void (*event_fn)(void *);
void *event_context;
+ struct list_head target_callbacks;
+
struct dm_md_mempools *mempools;
};
@@ -202,6 +204,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
+ INIT_LIST_HEAD(&t->target_callbacks);
atomic_set(&t->holders, 0);
if (!num_targets)
@@ -1174,10 +1177,18 @@ int dm_table_resume_targets(struct dm_table *t)
return 0;
}
+void dm_table_add_callbacks(struct dm_table *t,
+ struct target_callbacks *cb)
+{
+ list_add(&cb->list, &t->target_callbacks);
+}
+EXPORT_SYMBOL_GPL(dm_table_add_callbacks);
+
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
+ struct target_callbacks *cb;
int r = 0;
list_for_each_entry(dd, devices, list) {
@@ -1192,6 +1203,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
bdevname(dd->dm_dev.bdev, b));
}
+ list_for_each_entry(cb, &t->target_callbacks, list)
+ if (cb->congested_fn)
+ r |= cb->congested_fn(cb, bdi_bits);
+
return r;
}
@@ -3595,17 +3595,14 @@ static void raid5_unplug_device(struct request_queue *q)
unplug_slaves(mddev);
}
-static int raid5_congested(void *data, int bits)
+int md_raid5_congested(mddev_t *mddev, int bits)
{
- mddev_t *mddev = data;
raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check
* how busy the stripe_cache is
*/
- if (mddev_congested(mddev, bits))
- return 1;
if (conf->inactive_blocked)
return 1;
if (conf->quiesce)
@@ -3615,6 +3612,15 @@ static int raid5_congested(void *data, int bits)
return 0;
}
+EXPORT_SYMBOL_GPL(md_raid5_congested);
+
+static int raid5_congested(void *data, int bits)
+{
+ mddev_t *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid5_congested(mddev, bits);
+}
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
@@ -5106,13 +5112,14 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
+
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ mddev->queue->backing_dev_info.congested_fn = raid5_congested;
}
mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_device;
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = raid5_congested;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -5144,7 +5151,8 @@ static int stop(mddev_t *mddev)
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
+ if (mddev->queue)
+ mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
mddev->private = NULL;
@@ -497,4 +497,5 @@ static inline int algorithm_is_DDF(int layout)
{
return layout >= 8 && layout <= 10;
}
+extern int md_raid5_congested(mddev_t *mddev, int bits);
#endif
@@ -187,6 +187,12 @@ struct dm_target {
char *error;
};
+/* Each target can link one of these into the table */
+struct target_callbacks {
+ struct list_head list;
+ congested_fn *congested_fn;
+};
+
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -263,6 +269,12 @@ int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
/*
+ * Target_ctr should call this if they need to add any
+ * callback
+ */
+void dm_table_add_callbacks(struct dm_table *t,
+ struct target_callbacks *cb);
+/*
* Finally call this to make the table ready for use.
*/
int dm_table_complete(struct dm_table *t);