@@ -1769,6 +1769,7 @@ static struct elv_fs_entry as_attrs[] = {
ELV_ATTR(fairness),
ELV_ATTR(slice_sync),
ELV_ATTR(group_idle),
+ ELV_ATTR(map_sync),
#endif
__ATTR_NULL
};
@@ -466,6 +466,7 @@ static struct elv_fs_entry deadline_attrs[] = {
ELV_ATTR(fairness),
ELV_ATTR(slice_sync),
ELV_ATTR(group_idle),
+ ELV_ATTR(map_sync),
#endif
__ATTR_NULL
};
@@ -681,6 +681,8 @@ SHOW_FUNCTION(elv_slice_async_show, efqd->elv_slice[0], 1);
EXPORT_SYMBOL(elv_slice_async_show);
SHOW_FUNCTION(elv_fairness_show, efqd->fairness, 0);
EXPORT_SYMBOL(elv_fairness_show);
+SHOW_FUNCTION(elv_map_sync_show, efqd->map_sync, 0);
+EXPORT_SYMBOL(elv_map_sync_show);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -707,6 +709,8 @@ STORE_FUNCTION(elv_slice_async_store, &efqd->elv_slice[0], 1, UINT_MAX, 1);
EXPORT_SYMBOL(elv_slice_async_store);
STORE_FUNCTION(elv_fairness_store, &efqd->fairness, 0, 1, 0);
EXPORT_SYMBOL(elv_fairness_store);
+STORE_FUNCTION(elv_map_sync_store, &efqd->map_sync, 0, 1, 0);
+EXPORT_SYMBOL(elv_map_sync_store);
#undef STORE_FUNCTION
void elv_schedule_dispatch(struct request_queue *q)
@@ -1713,6 +1717,7 @@ struct io_group *elv_io_get_io_group_bio(struct request_queue *q,
struct bio *bio, int create)
{
struct page *page = NULL;
+ struct elv_fq_data *efqd = q->elevator->efqd;
/*
* Determine the group from task context. Even calls from
@@ -1731,7 +1736,7 @@ struct io_group *elv_io_get_io_group_bio(struct request_queue *q,
}
/* Map the sync bio to the right group using task context */
- if (elv_bio_sync(bio))
+ if (elv_bio_sync(bio) && !efqd->map_sync)
goto sync;
#ifdef CONFIG_TRACK_ASYNC_CONTEXT
@@ -205,6 +205,12 @@ struct elv_fq_data {
* queue before new queue is scheduled in
*/
unsigned int fairness;
+
+ /*
+ * Get io group bio belongs to from bio and not from submitting task
+ * context
+ */
+ unsigned int map_sync;
};
/* Logging facilities. */
@@ -436,6 +442,9 @@ extern struct io_group *elv_io_get_io_group_bio(struct request_queue *q,
extern ssize_t elv_group_idle_show(struct elevator_queue *q, char *name);
extern ssize_t elv_group_idle_store(struct elevator_queue *q, const char *name,
size_t count);
+extern ssize_t elv_map_sync_show(struct elevator_queue *q, char *name);
+extern ssize_t elv_map_sync_store(struct elevator_queue *q, const char *name,
+ size_t count);
extern void elv_io_group_congestion_threshold(struct request_queue *q,
struct io_group *iog);
static inline void elv_get_iog(struct io_group *iog)
@@ -88,6 +88,7 @@ static struct elv_fs_entry noop_attrs[] = {
ELV_ATTR(fairness),
ELV_ATTR(slice_sync),
ELV_ATTR(group_idle),
+ ELV_ATTR(map_sync),
__ATTR_NULL
};
#endif
o sync requests are mapped to the cgroup submitting tasks belongs to. This is an experimental patch where sync requets can be mapped to the group using bio tracking information and not submitting task's context. o This patch implements this functionality only for noop, deadline and AS. o Introducing this patch for experimental purposes to see if it is really meaningful or not. o This behavior is turned on by setting /sys/block/<disk>/queue/iosched/map_sync varibale. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> --- block/as-iosched.c | 1 + block/deadline-iosched.c | 1 + block/elevator-fq.c | 7 ++++++- block/elevator-fq.h | 9 +++++++++ block/noop-iosched.c | 1 + 5 files changed, 18 insertions(+), 1 deletions(-)