diff mbox series

[PATCHv2,1/6] blk-sysfs: remove q->sysfs_lock for attributes which don't need it

Message ID 20250218082908.265283-2-nilay@linux.ibm.com (mailing list archive)
State New
Headers show
Series block: fix lock order and remove redundant locking | expand

Commit Message

Nilay Shroff Feb. 18, 2025, 8:28 a.m. UTC
There're few sysfs attributes in block layer which don't really need
acquiring q->sysfs_lock while accessing it. The reason being, writing
a value to such attributes are either atomic or could be easily
protected using WRITE_ONCE()/READ_ONCE(). Moreover, sysfs attributes
are inherently protected with sysfs/kernfs internal locking.

So this change help segregate all existing sysfs attributes for which 
we could avoid acquiring q->sysfs_lock. We group all such attributes,
which don't require any sorts of locking, using macro QUEUE_RO_ENTRY_
NOLOCK() or QUEUE_RW_ENTRY_NOLOCK(). The newly introduced show/store 
method (show_nolock/store_nolock) is assigned to attributes using these 
new macros. The show_nolock/store_nolock run without holding q->sysfs_
lock.

Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
---
 block/blk-settings.c |   2 +-
 block/blk-sysfs.c    | 106 ++++++++++++++++++++++++++++++++-----------
 2 files changed, 81 insertions(+), 27 deletions(-)

Comments

Christoph Hellwig Feb. 18, 2025, 8:46 a.m. UTC | #1
On Tue, Feb 18, 2025 at 01:58:54PM +0530, Nilay Shroff wrote:
> There're few sysfs attributes in block layer which don't really need
> acquiring q->sysfs_lock while accessing it. The reason being, writing
> a value to such attributes are either atomic or could be easily
> protected using WRITE_ONCE()/READ_ONCE(). Moreover, sysfs attributes
> are inherently protected with sysfs/kernfs internal locking.
> 
> So this change help segregate all existing sysfs attributes for which 
> we could avoid acquiring q->sysfs_lock. We group all such attributes,
> which don't require any sorts of locking, using macro QUEUE_RO_ENTRY_
> NOLOCK() or QUEUE_RW_ENTRY_NOLOCK(). The newly introduced show/store 
> method (show_nolock/store_nolock) is assigned to attributes using these 
> new macros. The show_nolock/store_nolock run without holding q->sysfs_
> lock.

Can you add the analys why they don't need sysfs_lock to this commit
message please?

With that:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Nilay Shroff Feb. 18, 2025, 11:26 a.m. UTC | #2
On 2/18/25 2:16 PM, Christoph Hellwig wrote:
> On Tue, Feb 18, 2025 at 01:58:54PM +0530, Nilay Shroff wrote:
>> There're few sysfs attributes in block layer which don't really need
>> acquiring q->sysfs_lock while accessing it. The reason being, writing
>> a value to such attributes are either atomic or could be easily
>> protected using WRITE_ONCE()/READ_ONCE(). Moreover, sysfs attributes
>> are inherently protected with sysfs/kernfs internal locking.
>>
>> So this change help segregate all existing sysfs attributes for which 
>> we could avoid acquiring q->sysfs_lock. We group all such attributes,
>> which don't require any sorts of locking, using macro QUEUE_RO_ENTRY_
>> NOLOCK() or QUEUE_RW_ENTRY_NOLOCK(). The newly introduced show/store 
>> method (show_nolock/store_nolock) is assigned to attributes using these 
>> new macros. The show_nolock/store_nolock run without holding q->sysfs_
>> lock.
> 
> Can you add the analys why they don't need sysfs_lock to this commit
> message please?
Sure will do it in next patchset.
> 
> With that:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>
>
Ming Lei Feb. 18, 2025, 12:10 p.m. UTC | #3
On Tue, Feb 18, 2025 at 01:58:54PM +0530, Nilay Shroff wrote:
> There're few sysfs attributes in block layer which don't really need
> acquiring q->sysfs_lock while accessing it. The reason being, writing
> a value to such attributes are either atomic or could be easily
> protected using WRITE_ONCE()/READ_ONCE(). Moreover, sysfs attributes
> are inherently protected with sysfs/kernfs internal locking.
> 
> So this change help segregate all existing sysfs attributes for which 
> we could avoid acquiring q->sysfs_lock. We group all such attributes,
> which don't require any sorts of locking, using macro QUEUE_RO_ENTRY_
> NOLOCK() or QUEUE_RW_ENTRY_NOLOCK(). The newly introduced show/store 
> method (show_nolock/store_nolock) is assigned to attributes using these 
> new macros. The show_nolock/store_nolock run without holding q->sysfs_
> lock.
> 
> Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
> ---

...

>  
> +#define QUEUE_RO_ENTRY_NOLOCK(_prefix, _name)			\
> +static struct queue_sysfs_entry _prefix##_entry = {		\
> +	.attr		= {.name = _name, .mode = 0644 },	\
> +	.show_nolock	= _prefix##_show,			\
> +}
> +
> +#define QUEUE_RW_ENTRY_NOLOCK(_prefix, _name)			\
> +static struct queue_sysfs_entry _prefix##_entry = {		\
> +	.attr		= {.name = _name, .mode = 0644 },	\
> +	.show_nolock	= _prefix##_show,			\
> +	.store_nolock	= _prefix##_store,			\
> +}
> +
>  #define QUEUE_RW_ENTRY(_prefix, _name)			\
>  static struct queue_sysfs_entry _prefix##_entry = {	\
>  	.attr	= { .name = _name, .mode = 0644 },	\
> @@ -446,7 +470,7 @@ QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
>  QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
>  QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
>  QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
> -QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
> +QUEUE_RO_ENTRY_NOLOCK(queue_discard_zeroes_data, "discard_zeroes_data");

I think all QUEUE_RO_ENTRY needn't sysfs_lock, why do you just convert
part of them?


Thanks,
Ming
Nilay Shroff Feb. 18, 2025, 1:11 p.m. UTC | #4
On 2/18/25 5:40 PM, Ming Lei wrote:
> On Tue, Feb 18, 2025 at 01:58:54PM +0530, Nilay Shroff wrote:
>> There're few sysfs attributes in block layer which don't really need
>> acquiring q->sysfs_lock while accessing it. The reason being, writing
>> a value to such attributes are either atomic or could be easily
>> protected using WRITE_ONCE()/READ_ONCE(). Moreover, sysfs attributes
>> are inherently protected with sysfs/kernfs internal locking.
>>
>> So this change help segregate all existing sysfs attributes for which 
>> we could avoid acquiring q->sysfs_lock. We group all such attributes,
>> which don't require any sorts of locking, using macro QUEUE_RO_ENTRY_
>> NOLOCK() or QUEUE_RW_ENTRY_NOLOCK(). The newly introduced show/store 
>> method (show_nolock/store_nolock) is assigned to attributes using these 
>> new macros. The show_nolock/store_nolock run without holding q->sysfs_
>> lock.
>>
>> Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
>> ---
> 
> ...
> 
>>  
>> +#define QUEUE_RO_ENTRY_NOLOCK(_prefix, _name)			\
>> +static struct queue_sysfs_entry _prefix##_entry = {		\
>> +	.attr		= {.name = _name, .mode = 0644 },	\
>> +	.show_nolock	= _prefix##_show,			\
>> +}
>> +
>> +#define QUEUE_RW_ENTRY_NOLOCK(_prefix, _name)			\
>> +static struct queue_sysfs_entry _prefix##_entry = {		\
>> +	.attr		= {.name = _name, .mode = 0644 },	\
>> +	.show_nolock	= _prefix##_show,			\
>> +	.store_nolock	= _prefix##_store,			\
>> +}
>> +
>>  #define QUEUE_RW_ENTRY(_prefix, _name)			\
>>  static struct queue_sysfs_entry _prefix##_entry = {	\
>>  	.attr	= { .name = _name, .mode = 0644 },	\
>> @@ -446,7 +470,7 @@ QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
>>  QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
>>  QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
>>  QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
>> -QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
>> +QUEUE_RO_ENTRY_NOLOCK(queue_discard_zeroes_data, "discard_zeroes_data");
> 
> I think all QUEUE_RO_ENTRY needn't sysfs_lock, why do you just convert
> part of them?
> 
I think we have few read-only attributes which still need protection
using q->limits_lock. So if you refer 2nd patch in the series then you'd
find it. In this patch we group only attributes which don't require any
locking and grouped them under show_nolock/store_nolock.

Thanks,
--Nilay
Ming Lei Feb. 18, 2025, 1:45 p.m. UTC | #5
On Tue, Feb 18, 2025 at 06:41:06PM +0530, Nilay Shroff wrote:
> 
> 
> On 2/18/25 5:40 PM, Ming Lei wrote:
> > On Tue, Feb 18, 2025 at 01:58:54PM +0530, Nilay Shroff wrote:
> >> There're few sysfs attributes in block layer which don't really need
> >> acquiring q->sysfs_lock while accessing it. The reason being, writing
> >> a value to such attributes are either atomic or could be easily
> >> protected using WRITE_ONCE()/READ_ONCE(). Moreover, sysfs attributes
> >> are inherently protected with sysfs/kernfs internal locking.
> >>
> >> So this change help segregate all existing sysfs attributes for which 
> >> we could avoid acquiring q->sysfs_lock. We group all such attributes,
> >> which don't require any sorts of locking, using macro QUEUE_RO_ENTRY_
> >> NOLOCK() or QUEUE_RW_ENTRY_NOLOCK(). The newly introduced show/store 
> >> method (show_nolock/store_nolock) is assigned to attributes using these 
> >> new macros. The show_nolock/store_nolock run without holding q->sysfs_
> >> lock.
> >>
> >> Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
> >> ---
> > 
> > ...
> > 
> >>  
> >> +#define QUEUE_RO_ENTRY_NOLOCK(_prefix, _name)			\
> >> +static struct queue_sysfs_entry _prefix##_entry = {		\
> >> +	.attr		= {.name = _name, .mode = 0644 },	\
> >> +	.show_nolock	= _prefix##_show,			\
> >> +}
> >> +
> >> +#define QUEUE_RW_ENTRY_NOLOCK(_prefix, _name)			\
> >> +static struct queue_sysfs_entry _prefix##_entry = {		\
> >> +	.attr		= {.name = _name, .mode = 0644 },	\
> >> +	.show_nolock	= _prefix##_show,			\
> >> +	.store_nolock	= _prefix##_store,			\
> >> +}
> >> +
> >>  #define QUEUE_RW_ENTRY(_prefix, _name)			\
> >>  static struct queue_sysfs_entry _prefix##_entry = {	\
> >>  	.attr	= { .name = _name, .mode = 0644 },	\
> >> @@ -446,7 +470,7 @@ QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
> >>  QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
> >>  QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
> >>  QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
> >> -QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
> >> +QUEUE_RO_ENTRY_NOLOCK(queue_discard_zeroes_data, "discard_zeroes_data");
> > 
> > I think all QUEUE_RO_ENTRY needn't sysfs_lock, why do you just convert
> > part of them?
> > 
> I think we have few read-only attributes which still need protection
> using q->limits_lock. So if you refer 2nd patch in the series then you'd
> find it. In this patch we group only attributes which don't require any
> locking and grouped them under show_nolock/store_nolock.

IMO, this RO attributes needn't protection from q->limits_lock:

- no lifetime issue

- in-tree code needn't limits_lock.

- all are scalar variable, so the attribute itself is updated atomically

- the limits still may be updated after lock is released

So what do you want to protect on these RO attributes? My concern is
that it is too complicated to maintain multiple versions of such macro.


Thanks,
Ming
Christoph Hellwig Feb. 18, 2025, 4:29 p.m. UTC | #6
On Tue, Feb 18, 2025 at 09:45:02PM +0800, Ming Lei wrote:
> IMO, this RO attributes needn't protection from q->limits_lock:
> 
> - no lifetime issue
> 
> - in-tree code needn't limits_lock.
> 
> - all are scalar variable, so the attribute itself is updated atomically

Except in the memory model they aren't without READ_ONCE/WRITE_ONCE.

Given that the limits_lock is not a hot lock taking the lock is a very
easy way to mark our intent.  And if we get things like thread thread
sanitizer patches merged that will become essential.  Even KCSAN
might object already without it.
Ming Lei Feb. 19, 2025, 3:24 a.m. UTC | #7
On Tue, Feb 18, 2025 at 05:29:53PM +0100, Christoph Hellwig wrote:
> On Tue, Feb 18, 2025 at 09:45:02PM +0800, Ming Lei wrote:
> > IMO, this RO attributes needn't protection from q->limits_lock:
> > 
> > - no lifetime issue
> > 
> > - in-tree code needn't limits_lock.
> > 
> > - all are scalar variable, so the attribute itself is updated atomically
> 
> Except in the memory model they aren't without READ_ONCE/WRITE_ONCE.

RW_ONCE is supposed for avoiding compiler optimization, and scalar
variable atomic update should be decided by hardware.

> 
> Given that the limits_lock is not a hot lock taking the lock is a very
> easy way to mark our intent.  And if we get things like thread thread
> sanitizer patches merged that will become essential.  Even KCSAN
> might object already without it.

My main concern is that there are too many ->store()/->load() variants
now, but not deal if you think this way is fine, :-)

Thanks,
Ming
Christoph Hellwig Feb. 19, 2025, 5:42 a.m. UTC | #8
On Wed, Feb 19, 2025 at 11:24:43AM +0800, Ming Lei wrote:
> On Tue, Feb 18, 2025 at 05:29:53PM +0100, Christoph Hellwig wrote:
> > On Tue, Feb 18, 2025 at 09:45:02PM +0800, Ming Lei wrote:
> > > IMO, this RO attributes needn't protection from q->limits_lock:
> > > 
> > > - no lifetime issue
> > > 
> > > - in-tree code needn't limits_lock.
> > > 
> > > - all are scalar variable, so the attribute itself is updated atomically
> > 
> > Except in the memory model they aren't without READ_ONCE/WRITE_ONCE.
> 
> RW_ONCE is supposed for avoiding compiler optimization, and scalar
> variable atomic update should be decided by hardware.

Nothing force the compiler to update atomically without that.  Yes,
it is very unlikely to get thing wrong, but I'd rather be explicit.
And make sure static and dynamic analysis knows what we are doing here.
Nilay Shroff Feb. 19, 2025, 8:34 a.m. UTC | #9
On 2/19/25 8:54 AM, Ming Lei wrote:
> On Tue, Feb 18, 2025 at 05:29:53PM +0100, Christoph Hellwig wrote:
>> On Tue, Feb 18, 2025 at 09:45:02PM +0800, Ming Lei wrote:
>>> IMO, this RO attributes needn't protection from q->limits_lock:
>>>
>>> - no lifetime issue
>>>
>>> - in-tree code needn't limits_lock.
>>>
>>> - all are scalar variable, so the attribute itself is updated atomically
>>
>> Except in the memory model they aren't without READ_ONCE/WRITE_ONCE.
> 
> RW_ONCE is supposed for avoiding compiler optimization, and scalar
> variable atomic update should be decided by hardware.
> 
>>
>> Given that the limits_lock is not a hot lock taking the lock is a very
>> easy way to mark our intent.  And if we get things like thread thread
>> sanitizer patches merged that will become essential.  Even KCSAN
>> might object already without it.
> 
> My main concern is that there are too many ->store()/->load() variants
> now, but not deal if you think this way is fine, :-)
> 
We will only have ->store_limit()/->show_limit() and ->store()/->load() in
the next patchset as I am going to cleanup load_module() as well as get away with show_nolock() and store_nolock() methods as discussed with Christoph in 
another thread.

Thanks,
--Nilay
Nilay Shroff Feb. 19, 2025, 8:56 a.m. UTC | #10
On 2/19/25 2:04 PM, Nilay Shroff wrote:
> 
> 
> On 2/19/25 8:54 AM, Ming Lei wrote:
>> On Tue, Feb 18, 2025 at 05:29:53PM +0100, Christoph Hellwig wrote:
>>> On Tue, Feb 18, 2025 at 09:45:02PM +0800, Ming Lei wrote:
>>>> IMO, this RO attributes needn't protection from q->limits_lock:
>>>>
>>>> - no lifetime issue
>>>>
>>>> - in-tree code needn't limits_lock.
>>>>
>>>> - all are scalar variable, so the attribute itself is updated atomically
>>>
>>> Except in the memory model they aren't without READ_ONCE/WRITE_ONCE.
>>
>> RW_ONCE is supposed for avoiding compiler optimization, and scalar
>> variable atomic update should be decided by hardware.
>>
>>>
>>> Given that the limits_lock is not a hot lock taking the lock is a very
>>> easy way to mark our intent.  And if we get things like thread thread
>>> sanitizer patches merged that will become essential.  Even KCSAN
>>> might object already without it.
>>
>> My main concern is that there are too many ->store()/->load() variants
>> now, but not deal if you think this way is fine, :-)
>>
> We will only have ->store_limit()/->show_limit() and ->store()/->load() in
> the next patchset as I am going to cleanup load_module() as well as get away with show_nolock() and store_nolock() methods as discussed with Christoph in 
> another thread.
> 

Sorry a typo, I meant we will only have ->store_limit()/->show_limit()
and ->store()/show() methods. Also, we'll cleanup load_module() as well
as get away with show_nolock() and store_nolock() methods in the next 
patchset as discussed with Christoph in another thread.

Thanks,
--Nilay
Ming Lei Feb. 19, 2025, 9:20 a.m. UTC | #11
On Wed, Feb 19, 2025 at 02:26:49PM +0530, Nilay Shroff wrote:
> 
> 
> On 2/19/25 2:04 PM, Nilay Shroff wrote:
> > 
> > 
> > On 2/19/25 8:54 AM, Ming Lei wrote:
> >> On Tue, Feb 18, 2025 at 05:29:53PM +0100, Christoph Hellwig wrote:
> >>> On Tue, Feb 18, 2025 at 09:45:02PM +0800, Ming Lei wrote:
> >>>> IMO, this RO attributes needn't protection from q->limits_lock:
> >>>>
> >>>> - no lifetime issue
> >>>>
> >>>> - in-tree code needn't limits_lock.
> >>>>
> >>>> - all are scalar variable, so the attribute itself is updated atomically
> >>>
> >>> Except in the memory model they aren't without READ_ONCE/WRITE_ONCE.
> >>
> >> RW_ONCE is supposed for avoiding compiler optimization, and scalar
> >> variable atomic update should be decided by hardware.
> >>
> >>>
> >>> Given that the limits_lock is not a hot lock taking the lock is a very
> >>> easy way to mark our intent.  And if we get things like thread thread
> >>> sanitizer patches merged that will become essential.  Even KCSAN
> >>> might object already without it.
> >>
> >> My main concern is that there are too many ->store()/->load() variants
> >> now, but not deal if you think this way is fine, :-)
> >>
> > We will only have ->store_limit()/->show_limit() and ->store()/->load() in
> > the next patchset as I am going to cleanup load_module() as well as get away with show_nolock() and store_nolock() methods as discussed with Christoph in 
> > another thread.
> > 
> 
> Sorry a typo, I meant we will only have ->store_limit()/->show_limit()
> and ->store()/show() methods. Also, we'll cleanup load_module() as well
> as get away with show_nolock() and store_nolock() methods in the next 
> patchset as discussed with Christoph in another thread.

OK, that looks much better!


Thanks,
Ming
diff mbox series

Patch

diff --git a/block/blk-settings.c b/block/blk-settings.c
index c44dadc35e1e..c541bf22f543 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -21,7 +21,7 @@ 
 
 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 {
-	q->rq_timeout = timeout;
+	WRITE_ONCE(q->rq_timeout, timeout);
 }
 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6f548a4376aa..0c9be7c7ecc1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -23,9 +23,14 @@ 
 struct queue_sysfs_entry {
 	struct attribute attr;
 	ssize_t (*show)(struct gendisk *disk, char *page);
+	ssize_t (*show_nolock)(struct gendisk *disk, char *page);
+
 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
+	ssize_t (*store_nolock)(struct gendisk *disk,
+			const char *page, size_t count);
 	int (*store_limit)(struct gendisk *disk, const char *page,
 			size_t count, struct queue_limits *lim);
+
 	void (*load_module)(struct gendisk *disk, const char *page, size_t count);
 };
 
@@ -320,7 +325,12 @@  queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
 	ret = queue_var_store(&val, page, count);
 	if (ret < 0)
 		return ret;
-
+	/*
+	 * Here we update two queue flags each using atomic bitops, although
+	 * updating two flags isn't atomic it should be harmless as those flags
+	 * are accessed individually using atomic test_bit operation. So we
+	 * don't grab any lock while updating these flags.
+	 */
 	if (val == 2) {
 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
@@ -353,7 +363,8 @@  static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
 
 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
 {
-	return sysfs_emit(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
+	return sysfs_emit(page, "%u\n",
+			jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout)));
 }
 
 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
@@ -405,6 +416,19 @@  static struct queue_sysfs_entry _prefix##_entry = {	\
 	.show	= _prefix##_show,			\
 };
 
+#define QUEUE_RO_ENTRY_NOLOCK(_prefix, _name)			\
+static struct queue_sysfs_entry _prefix##_entry = {		\
+	.attr		= {.name = _name, .mode = 0644 },	\
+	.show_nolock	= _prefix##_show,			\
+}
+
+#define QUEUE_RW_ENTRY_NOLOCK(_prefix, _name)			\
+static struct queue_sysfs_entry _prefix##_entry = {		\
+	.attr		= {.name = _name, .mode = 0644 },	\
+	.show_nolock	= _prefix##_show,			\
+	.store_nolock	= _prefix##_store,			\
+}
+
 #define QUEUE_RW_ENTRY(_prefix, _name)			\
 static struct queue_sysfs_entry _prefix##_entry = {	\
 	.attr	= { .name = _name, .mode = 0644 },	\
@@ -446,7 +470,7 @@  QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
 QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
-QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
+QUEUE_RO_ENTRY_NOLOCK(queue_discard_zeroes_data, "discard_zeroes_data");
 
 QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
 QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
@@ -454,25 +478,25 @@  QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
 
-QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
+QUEUE_RO_ENTRY_NOLOCK(queue_write_same_max, "write_same_max_bytes");
 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
 QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
 
 QUEUE_RO_ENTRY(queue_zoned, "zoned");
-QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
+QUEUE_RO_ENTRY_NOLOCK(queue_nr_zones, "nr_zones");
 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
 
-QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
+QUEUE_RW_ENTRY_NOLOCK(queue_nomerges, "nomerges");
 QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
-QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
-QUEUE_RW_ENTRY(queue_poll, "io_poll");
-QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
+QUEUE_RW_ENTRY_NOLOCK(queue_rq_affinity, "rq_affinity");
+QUEUE_RW_ENTRY_NOLOCK(queue_poll, "io_poll");
+QUEUE_RW_ENTRY_NOLOCK(queue_poll_delay, "io_poll_delay");
 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
 QUEUE_RO_ENTRY(queue_fua, "fua");
 QUEUE_RO_ENTRY(queue_dax, "dax");
-QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
+QUEUE_RW_ENTRY_NOLOCK(queue_io_timeout, "io_timeout");
 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
 
@@ -561,9 +585,11 @@  QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
 
 /* Common attributes for bio-based and request-based queues. */
 static struct attribute *queue_attrs[] = {
+	/*
+	 * attributes protected with q->sysfs_lock
+	 */
 	&queue_ra_entry.attr,
 	&queue_max_hw_sectors_entry.attr,
-	&queue_max_sectors_entry.attr,
 	&queue_max_segments_entry.attr,
 	&queue_max_discard_segments_entry.attr,
 	&queue_max_integrity_segments_entry.attr,
@@ -575,46 +601,63 @@  static struct attribute *queue_attrs[] = {
 	&queue_io_min_entry.attr,
 	&queue_io_opt_entry.attr,
 	&queue_discard_granularity_entry.attr,
-	&queue_max_discard_sectors_entry.attr,
 	&queue_max_hw_discard_sectors_entry.attr,
-	&queue_discard_zeroes_data_entry.attr,
 	&queue_atomic_write_max_sectors_entry.attr,
 	&queue_atomic_write_boundary_sectors_entry.attr,
 	&queue_atomic_write_unit_min_entry.attr,
 	&queue_atomic_write_unit_max_entry.attr,
-	&queue_write_same_max_entry.attr,
 	&queue_max_write_zeroes_sectors_entry.attr,
 	&queue_max_zone_append_sectors_entry.attr,
 	&queue_zone_write_granularity_entry.attr,
-	&queue_rotational_entry.attr,
 	&queue_zoned_entry.attr,
-	&queue_nr_zones_entry.attr,
 	&queue_max_open_zones_entry.attr,
 	&queue_max_active_zones_entry.attr,
-	&queue_nomerges_entry.attr,
+	&queue_fua_entry.attr,
+	&queue_dax_entry.attr,
+	&queue_virt_boundary_mask_entry.attr,
+	&queue_dma_alignment_entry.attr,
+
+	/*
+	 * attributes protected with q->limits_lock
+	 */
+	&queue_max_sectors_entry.attr,
+	&queue_max_discard_sectors_entry.attr,
+	&queue_rotational_entry.attr,
 	&queue_iostats_passthrough_entry.attr,
 	&queue_iostats_entry.attr,
 	&queue_stable_writes_entry.attr,
 	&queue_add_random_entry.attr,
-	&queue_poll_entry.attr,
 	&queue_wc_entry.attr,
-	&queue_fua_entry.attr,
-	&queue_dax_entry.attr,
+
+	/*
+	 * attributes which don't require locking
+	 */
+	&queue_nomerges_entry.attr,
+	&queue_poll_entry.attr,
 	&queue_poll_delay_entry.attr,
-	&queue_virt_boundary_mask_entry.attr,
-	&queue_dma_alignment_entry.attr,
+	&queue_discard_zeroes_data_entry.attr,
+	&queue_write_same_max_entry.attr,
+	&queue_nr_zones_entry.attr,
+
 	NULL,
 };
 
 /* Request-based queue attributes that are not relevant for bio-based queues. */
 static struct attribute *blk_mq_queue_attrs[] = {
+	/*
+	 * attributes protected with q->sysfs_lock
+	 */
 	&queue_requests_entry.attr,
 	&elv_iosched_entry.attr,
-	&queue_rq_affinity_entry.attr,
-	&queue_io_timeout_entry.attr,
 #ifdef CONFIG_BLK_WBT
 	&queue_wb_lat_entry.attr,
 #endif
+	/*
+	 * attrbiutes which don't require locking
+	 */
+	&queue_rq_affinity_entry.attr,
+	&queue_io_timeout_entry.attr,
+
 	NULL,
 };
 
@@ -666,8 +709,12 @@  queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
 	ssize_t res;
 
-	if (!entry->show)
+	if (!entry->show && !entry->show_nolock)
 		return -EIO;
+
+	if (entry->show_nolock)
+		return entry->show_nolock(disk, page);
+
 	mutex_lock(&disk->queue->sysfs_lock);
 	res = entry->show(disk, page);
 	mutex_unlock(&disk->queue->sysfs_lock);
@@ -684,7 +731,7 @@  queue_attr_store(struct kobject *kobj, struct attribute *attr,
 	unsigned int memflags;
 	ssize_t res;
 
-	if (!entry->store_limit && !entry->store)
+	if (!entry->store_limit && !entry->store_nolock && !entry->store)
 		return -EIO;
 
 	/*
@@ -695,6 +742,13 @@  queue_attr_store(struct kobject *kobj, struct attribute *attr,
 	if (entry->load_module)
 		entry->load_module(disk, page, length);
 
+	if (entry->store_nolock) {
+		memflags = blk_mq_freeze_queue(q);
+		res = entry->store_nolock(disk, page, length);
+		blk_mq_unfreeze_queue(q, memflags);
+		return res;
+	}
+
 	if (entry->store_limit) {
 		struct queue_limits lim = queue_limits_start_update(q);