@@ -400,12 +400,12 @@ static int hctx_tags_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
goto out;
if (hctx->tags)
blk_mq_debugfs_tags_show(m, hctx->tags);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
out:
return res;
@@ -417,12 +417,12 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
goto out;
if (hctx->tags)
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
out:
return res;
@@ -434,12 +434,12 @@ static int hctx_sched_tags_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
goto out;
if (hctx->sched_tags)
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
out:
return res;
@@ -451,12 +451,12 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
goto out;
if (hctx->sched_tags)
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
out:
return res;
@@ -568,9 +568,9 @@ struct request_queue {
* nr_requests and wbt latency, this lock also protects the sysfs attrs
* nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
* may modify hctx tags, reserved-tags and cpumask, so this lock also
- * helps protect the hctx attrs. To ensure proper locking order during
- * an elevator or nr_hw_queue update, first freeze the queue, then
- * acquire ->elevator_lock.
+ * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
+ * order during an elevator or nr_hw_queue update, first freeze the
+ * queue, then acquire ->elevator_lock.
*/
struct mutex elevator_lock;
Currently, the block debugfs attributes (tags, tags_bitmap, sched_tags, and sched_tags_bitmap) are protected using q->sysfs_lock. However, these attributes are updated in multiple scenarios: - During driver probe method - During an elevator switch/update - During an nr_hw_queues update - When writing to the sysfs attribute nr_requests All these update paths (except driver probe method which anyways doesn't not require any protection) are already protected using q->elevator_lock. So to ensure consistency and proper synchronization, replace q->sysfs_ lock with q->elevator_lock for protecting these debugfs attributes. Signed-off-by: Nilay Shroff <nilay@linux.ibm.com> --- block/blk-mq-debugfs.c | 16 ++++++++-------- include/linux/blkdev.h | 6 +++--- 2 files changed, 11 insertions(+), 11 deletions(-)