Message ID | 20250107063120.1011593-5-hch@lst.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [1/8] block: fix docs for freezing of queue limits updates | expand |
On Tue, Jan 07, 2025 at 07:30:36AM +0100, Christoph Hellwig wrote: > De-duplicate the code for updating queue limits by adding a store_limit > method that allows having common code handle the actual queue limits > update. > > Note that this is a pure refactoring patch and does not address the > existing freeze vs limits lock order problem in the refactored code, > which will be addressed next. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > block/blk-sysfs.c | 128 ++++++++++++++++++++++------------------------ > 1 file changed, 61 insertions(+), 67 deletions(-) > > diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c > index 767598e719ab..8d69315e986d 100644 > --- a/block/blk-sysfs.c > +++ b/block/blk-sysfs.c > @@ -24,6 +24,8 @@ struct queue_sysfs_entry { > struct attribute attr; > ssize_t (*show)(struct gendisk *disk, char *page); > ssize_t (*store)(struct gendisk *disk, const char *page, size_t count); > + int (*store_limit)(struct gendisk *disk, const char *page, > + size_t count, struct queue_limits *lim); > void (*load_module)(struct gendisk *disk, const char *page, size_t count); > }; > > @@ -153,13 +155,11 @@ QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0) > QUEUE_SYSFS_SHOW_CONST(write_same_max, 0) > QUEUE_SYSFS_SHOW_CONST(poll_delay, -1) > > -static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, > - const char *page, size_t count) > +static int queue_max_discard_sectors_store(struct gendisk *disk, > + const char *page, size_t count, struct queue_limits *lim) > { > unsigned long max_discard_bytes; > - struct queue_limits lim; > ssize_t ret; > - int err; > > ret = queue_var_store(&max_discard_bytes, page, count); > if (ret < 0) > @@ -171,38 +171,28 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, > if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) > return -EINVAL; > > - lim = queue_limits_start_update(disk->queue); > - lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; > - err = queue_limits_commit_update(disk->queue, &lim); > - if (err) > - return err; > - return ret; > + lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; > + return 0; > } > > -static ssize_t > -queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count) > +static int > +queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, > + struct queue_limits *lim) > { > unsigned long max_sectors_kb; > - struct queue_limits lim; > ssize_t ret; > - int err; > > ret = queue_var_store(&max_sectors_kb, page, count); > if (ret < 0) > return ret; > > - lim = queue_limits_start_update(disk->queue); > - lim.max_user_sectors = max_sectors_kb << 1; > - err = queue_limits_commit_update(disk->queue, &lim); > - if (err) > - return err; > - return ret; > + lim->max_user_sectors = max_sectors_kb << 1; > + return 0; > } > > static ssize_t queue_feature_store(struct gendisk *disk, const char *page, > - size_t count, blk_features_t feature) > + size_t count, struct queue_limits *lim, blk_features_t feature) > { > - struct queue_limits lim; > unsigned long val; > ssize_t ret; > > @@ -210,15 +200,11 @@ static ssize_t queue_feature_store(struct gendisk *disk, const char *page, > if (ret < 0) > return ret; > > - lim = queue_limits_start_update(disk->queue); > if (val) > - lim.features |= feature; > + lim->features |= feature; > else > - lim.features &= ~feature; > - ret = queue_limits_commit_update(disk->queue, &lim); > - if (ret) > - return ret; > - return count; > + lim->features &= ~feature; > + return 0; > } > > #define QUEUE_SYSFS_FEATURE(_name, _feature) \ > @@ -227,10 +213,10 @@ static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ > return sysfs_emit(page, "%u\n", \ > !!(disk->queue->limits.features & _feature)); \ > } \ > -static ssize_t queue_##_name##_store(struct gendisk *disk, \ > - const char *page, size_t count) \ > +static int queue_##_name##_store(struct gendisk *disk, \ > + const char *page, size_t count, struct queue_limits *lim) \ > { \ > - return queue_feature_store(disk, page, count, _feature); \ > + return queue_feature_store(disk, page, count, lim, _feature); \ > } > > QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) > @@ -266,10 +252,9 @@ static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) > return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); > } > > -static ssize_t queue_iostats_passthrough_store(struct gendisk *disk, > - const char *page, size_t count) > +static int queue_iostats_passthrough_store(struct gendisk *disk, > + const char *page, size_t count, struct queue_limits *lim) > { > - struct queue_limits lim; > unsigned long ios; > ssize_t ret; > > @@ -277,18 +262,13 @@ static ssize_t queue_iostats_passthrough_store(struct gendisk *disk, > if (ret < 0) > return ret; > > - lim = queue_limits_start_update(disk->queue); > if (ios) > - lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; > + lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; > else > - lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; > - > - ret = queue_limits_commit_update(disk->queue, &lim); > - if (ret) > - return ret; > - > - return count; > + lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; > + return 0; > } > + > static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) > { > return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | > @@ -391,12 +371,10 @@ static ssize_t queue_wc_show(struct gendisk *disk, char *page) > return sysfs_emit(page, "write through\n"); > } > > -static ssize_t queue_wc_store(struct gendisk *disk, const char *page, > - size_t count) > +static int queue_wc_store(struct gendisk *disk, const char *page, > + size_t count, struct queue_limits *lim) > { > - struct queue_limits lim; > bool disable; > - int err; > > if (!strncmp(page, "write back", 10)) { > disable = false; > @@ -407,15 +385,11 @@ static ssize_t queue_wc_store(struct gendisk *disk, const char *page, > return -EINVAL; > } > > - lim = queue_limits_start_update(disk->queue); > if (disable) > - lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED; > + lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; > else > - lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; > - err = queue_limits_commit_update(disk->queue, &lim); > - if (err) > - return err; > - return count; > + lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; > + return 0; > } > > #define QUEUE_RO_ENTRY(_prefix, _name) \ > @@ -431,6 +405,13 @@ static struct queue_sysfs_entry _prefix##_entry = { \ > .store = _prefix##_store, \ > }; > > +#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \ > +static struct queue_sysfs_entry _prefix##_entry = { \ > + .attr = { .name = _name, .mode = 0644 }, \ > + .show = _prefix##_show, \ > + .store_limit = _prefix##_store, \ > +} > + > #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \ > static struct queue_sysfs_entry _prefix##_entry = { \ > .attr = { .name = _name, .mode = 0644 }, \ > @@ -441,7 +422,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \ > > QUEUE_RW_ENTRY(queue_requests, "nr_requests"); > QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); > -QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); > +QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); > QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); > QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); > QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); > @@ -457,7 +438,7 @@ QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); > QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); > QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); > QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); > -QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); > +QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); > QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); > > QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); > @@ -477,11 +458,11 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); > QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); > > QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); > -QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); > +QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); > QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); > QUEUE_RW_ENTRY(queue_poll, "io_poll"); > QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); > -QUEUE_RW_ENTRY(queue_wc, "write_cache"); > +QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); > QUEUE_RO_ENTRY(queue_fua, "fua"); > QUEUE_RO_ENTRY(queue_dax, "dax"); > QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); > @@ -494,10 +475,10 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { > .show = queue_logical_block_size_show, > }; > > -QUEUE_RW_ENTRY(queue_rotational, "rotational"); > -QUEUE_RW_ENTRY(queue_iostats, "iostats"); > -QUEUE_RW_ENTRY(queue_add_random, "add_random"); > -QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); > +QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); > +QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); > +QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); > +QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); > > #ifdef CONFIG_BLK_WBT > static ssize_t queue_var_store64(s64 *var, const char *page) > @@ -695,7 +676,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, > struct request_queue *q = disk->queue; > ssize_t res; > > - if (!entry->store) > + if (!entry->store_limit && !entry->store) > return -EIO; > > /* > @@ -706,11 +687,24 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, > if (entry->load_module) > entry->load_module(disk, page, length); > > - blk_mq_freeze_queue(q); > mutex_lock(&q->sysfs_lock); > - res = entry->store(disk, page, length); > - mutex_unlock(&q->sysfs_lock); > + blk_mq_freeze_queue(q); Order between freeze and ->sysfs_lock is changed, and it may cause new lockdep warning because we may freeze queue first before acquiring ->sysfs_lock in del_gendisk(). thanks, Ming
On 1/7/25 12:55 PM, Ming Lei wrote: > On Tue, Jan 07, 2025 at 07:30:36AM +0100, Christoph Hellwig wrote: >> De-duplicate the code for updating queue limits by adding a store_limit >> method that allows having common code handle the actual queue limits >> update. >> >> Note that this is a pure refactoring patch and does not address the >> existing freeze vs limits lock order problem in the refactored code, >> which will be addressed next. >> >> Signed-off-by: Christoph Hellwig <hch@lst.de> >> --- >> block/blk-sysfs.c | 128 ++++++++++++++++++++++------------------------ >> 1 file changed, 61 insertions(+), 67 deletions(-) >> >> diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c >> index 767598e719ab..8d69315e986d 100644 >> --- a/block/blk-sysfs.c >> +++ b/block/blk-sysfs.c >> @@ -24,6 +24,8 @@ struct queue_sysfs_entry { >> struct attribute attr; >> ssize_t (*show)(struct gendisk *disk, char *page); >> ssize_t (*store)(struct gendisk *disk, const char *page, size_t count); >> + int (*store_limit)(struct gendisk *disk, const char *page, >> + size_t count, struct queue_limits *lim); >> void (*load_module)(struct gendisk *disk, const char *page, size_t count); >> }; >> >> @@ -153,13 +155,11 @@ QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0) >> QUEUE_SYSFS_SHOW_CONST(write_same_max, 0) >> QUEUE_SYSFS_SHOW_CONST(poll_delay, -1) >> >> -static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, >> - const char *page, size_t count) >> +static int queue_max_discard_sectors_store(struct gendisk *disk, >> + const char *page, size_t count, struct queue_limits *lim) >> { >> unsigned long max_discard_bytes; >> - struct queue_limits lim; >> ssize_t ret; >> - int err; >> >> ret = queue_var_store(&max_discard_bytes, page, count); >> if (ret < 0) >> @@ -171,38 +171,28 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, >> if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) >> return -EINVAL; >> >> - lim = queue_limits_start_update(disk->queue); >> - lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; >> - err = queue_limits_commit_update(disk->queue, &lim); >> - if (err) >> - return err; >> - return ret; >> + lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; >> + return 0; >> } >> >> -static ssize_t >> -queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count) >> +static int >> +queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, >> + struct queue_limits *lim) >> { >> unsigned long max_sectors_kb; >> - struct queue_limits lim; >> ssize_t ret; >> - int err; >> >> ret = queue_var_store(&max_sectors_kb, page, count); >> if (ret < 0) >> return ret; >> >> - lim = queue_limits_start_update(disk->queue); >> - lim.max_user_sectors = max_sectors_kb << 1; >> - err = queue_limits_commit_update(disk->queue, &lim); >> - if (err) >> - return err; >> - return ret; >> + lim->max_user_sectors = max_sectors_kb << 1; >> + return 0; >> } >> >> static ssize_t queue_feature_store(struct gendisk *disk, const char *page, >> - size_t count, blk_features_t feature) >> + size_t count, struct queue_limits *lim, blk_features_t feature) >> { >> - struct queue_limits lim; >> unsigned long val; >> ssize_t ret; >> >> @@ -210,15 +200,11 @@ static ssize_t queue_feature_store(struct gendisk *disk, const char *page, >> if (ret < 0) >> return ret; >> >> - lim = queue_limits_start_update(disk->queue); >> if (val) >> - lim.features |= feature; >> + lim->features |= feature; >> else >> - lim.features &= ~feature; >> - ret = queue_limits_commit_update(disk->queue, &lim); >> - if (ret) >> - return ret; >> - return count; >> + lim->features &= ~feature; >> + return 0; >> } >> >> #define QUEUE_SYSFS_FEATURE(_name, _feature) \ >> @@ -227,10 +213,10 @@ static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ >> return sysfs_emit(page, "%u\n", \ >> !!(disk->queue->limits.features & _feature)); \ >> } \ >> -static ssize_t queue_##_name##_store(struct gendisk *disk, \ >> - const char *page, size_t count) \ >> +static int queue_##_name##_store(struct gendisk *disk, \ >> + const char *page, size_t count, struct queue_limits *lim) \ >> { \ >> - return queue_feature_store(disk, page, count, _feature); \ >> + return queue_feature_store(disk, page, count, lim, _feature); \ >> } >> >> QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) >> @@ -266,10 +252,9 @@ static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) >> return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); >> } >> >> -static ssize_t queue_iostats_passthrough_store(struct gendisk *disk, >> - const char *page, size_t count) >> +static int queue_iostats_passthrough_store(struct gendisk *disk, >> + const char *page, size_t count, struct queue_limits *lim) >> { >> - struct queue_limits lim; >> unsigned long ios; >> ssize_t ret; >> >> @@ -277,18 +262,13 @@ static ssize_t queue_iostats_passthrough_store(struct gendisk *disk, >> if (ret < 0) >> return ret; >> >> - lim = queue_limits_start_update(disk->queue); >> if (ios) >> - lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; >> + lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; >> else >> - lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; >> - >> - ret = queue_limits_commit_update(disk->queue, &lim); >> - if (ret) >> - return ret; >> - >> - return count; >> + lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; >> + return 0; >> } >> + >> static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) >> { >> return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | >> @@ -391,12 +371,10 @@ static ssize_t queue_wc_show(struct gendisk *disk, char *page) >> return sysfs_emit(page, "write through\n"); >> } >> >> -static ssize_t queue_wc_store(struct gendisk *disk, const char *page, >> - size_t count) >> +static int queue_wc_store(struct gendisk *disk, const char *page, >> + size_t count, struct queue_limits *lim) >> { >> - struct queue_limits lim; >> bool disable; >> - int err; >> >> if (!strncmp(page, "write back", 10)) { >> disable = false; >> @@ -407,15 +385,11 @@ static ssize_t queue_wc_store(struct gendisk *disk, const char *page, >> return -EINVAL; >> } >> >> - lim = queue_limits_start_update(disk->queue); >> if (disable) >> - lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED; >> + lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; >> else >> - lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; >> - err = queue_limits_commit_update(disk->queue, &lim); >> - if (err) >> - return err; >> - return count; >> + lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; >> + return 0; >> } >> >> #define QUEUE_RO_ENTRY(_prefix, _name) \ >> @@ -431,6 +405,13 @@ static struct queue_sysfs_entry _prefix##_entry = { \ >> .store = _prefix##_store, \ >> }; >> >> +#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \ >> +static struct queue_sysfs_entry _prefix##_entry = { \ >> + .attr = { .name = _name, .mode = 0644 }, \ >> + .show = _prefix##_show, \ >> + .store_limit = _prefix##_store, \ >> +} >> + >> #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \ >> static struct queue_sysfs_entry _prefix##_entry = { \ >> .attr = { .name = _name, .mode = 0644 }, \ >> @@ -441,7 +422,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \ >> >> QUEUE_RW_ENTRY(queue_requests, "nr_requests"); >> QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); >> -QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); >> +QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); >> QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); >> QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); >> QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); >> @@ -457,7 +438,7 @@ QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); >> QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); >> QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); >> QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); >> -QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); >> +QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); >> QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); >> >> QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); >> @@ -477,11 +458,11 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); >> QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); >> >> QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); >> -QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); >> +QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); >> QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); >> QUEUE_RW_ENTRY(queue_poll, "io_poll"); >> QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); >> -QUEUE_RW_ENTRY(queue_wc, "write_cache"); >> +QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); >> QUEUE_RO_ENTRY(queue_fua, "fua"); >> QUEUE_RO_ENTRY(queue_dax, "dax"); >> QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); >> @@ -494,10 +475,10 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { >> .show = queue_logical_block_size_show, >> }; >> >> -QUEUE_RW_ENTRY(queue_rotational, "rotational"); >> -QUEUE_RW_ENTRY(queue_iostats, "iostats"); >> -QUEUE_RW_ENTRY(queue_add_random, "add_random"); >> -QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); >> +QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); >> +QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); >> +QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); >> +QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); >> >> #ifdef CONFIG_BLK_WBT >> static ssize_t queue_var_store64(s64 *var, const char *page) >> @@ -695,7 +676,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, >> struct request_queue *q = disk->queue; >> ssize_t res; >> >> - if (!entry->store) >> + if (!entry->store_limit && !entry->store) >> return -EIO; >> >> /* >> @@ -706,11 +687,24 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, >> if (entry->load_module) >> entry->load_module(disk, page, length); >> >> - blk_mq_freeze_queue(q); >> mutex_lock(&q->sysfs_lock); >> - res = entry->store(disk, page, length); >> - mutex_unlock(&q->sysfs_lock); >> + blk_mq_freeze_queue(q); > > Order between freeze and ->sysfs_lock is changed, and it may cause new > lockdep warning because we may freeze queue first before acquiring > ->sysfs_lock in del_gendisk(). > On contrary, in elevator_disable() and elevator_switch() we acquire ->sysfs_lock first before freezing the queue. I think this is a mess and we need to fix ordering. We need to decide ordering rules. IMO, the correct order should be to acquire ->sysfs_lock before freezing queue. Likewise with this patch now we acquire ->limits_lock before freezing the queue. Thanks, --Nilay
Hi Ming, this is a friendly reminder to reply without quoting the entire mail. I did not find any content after scrolling half a dozend page of full quote and gave up. > > + .attr = { .name = _name, .mode = 0644 }, \ > > + .show = _prefix##_show, \ > > + .store_limit = _prefix##_store, \ > > +} > > + > > #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \ > > static struct queue_sysfs_entry _prefix##_entry = { \ > > .attr = { .name = _name, .mode = 0644 }, \ > > @@ -441,7 +422,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \ > > > > QUEUE_RW_ENTRY(queue_requests, "nr_requests"); > > QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); > > -QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); > > +QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); > > QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); > > QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); > > QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); > > @@ -457,7 +438,7 @@ QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); > > QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); > > QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); > > QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); > > -QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); > > +QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); > > QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); > > > > QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); > > @@ -477,11 +458,11 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); > > QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); > > > > QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); > > -QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); > > +QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); > > QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); > > QUEUE_RW_ENTRY(queue_poll, "io_poll"); > > QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); > > -QUEUE_RW_ENTRY(queue_wc, "write_cache"); > > +QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); > > QUEUE_RO_ENTRY(queue_fua, "fua"); > > QUEUE_RO_ENTRY(queue_dax, "dax"); > > QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); > > @@ -494,10 +475,10 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { > > .show = queue_logical_block_size_show, > > }; > > > > -QUEUE_RW_ENTRY(queue_rotational, "rotational"); > > -QUEUE_RW_ENTRY(queue_iostats, "iostats"); > > -QUEUE_RW_ENTRY(queue_add_random, "add_random"); > > -QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); > > +QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); > > +QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); > > +QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); > > +QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); > > > > #ifdef CONFIG_BLK_WBT > > static ssize_t queue_var_store64(s64 *var, const char *page) > > @@ -695,7 +676,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, > > struct request_queue *q = disk->queue; > > ssize_t res; > > > > - if (!entry->store) > > + if (!entry->store_limit && !entry->store) > > return -EIO; > > > > /* > > @@ -706,11 +687,24 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, > > if (entry->load_module) > > entry->load_module(disk, page, length); > > > > - blk_mq_freeze_queue(q); > > mutex_lock(&q->sysfs_lock); > > - res = entry->store(disk, page, length); > > - mutex_unlock(&q->sysfs_lock); > > + blk_mq_freeze_queue(q); > > Order between freeze and ->sysfs_lock is changed, and it may cause new > lockdep warning because we may freeze queue first before acquiring > ->sysfs_lock in del_gendisk(). > > > thanks, > Ming ---end quoted text---
Hi Nilay, this is a friendly reminder to reply without quoting the entire mail. I did not find any content after scrolling half a dozen page of full quote and gave up.
On 1/7/25 15:30, Christoph Hellwig wrote: > De-duplicate the code for updating queue limits by adding a store_limit > method that allows having common code handle the actual queue limits > update. > > Note that this is a pure refactoring patch and does not address the > existing freeze vs limits lock order problem in the refactored code, > which will be addressed next. > > Signed-off-by: Christoph Hellwig <hch@lst.de> Looks good. Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
On Tue, Jan 07, 2025 at 01:21:14PM +0530, Nilay Shroff wrote: > > > On 1/7/25 12:55 PM, Ming Lei wrote: > > On Tue, Jan 07, 2025 at 07:30:36AM +0100, Christoph Hellwig wrote: > >> De-duplicate the code for updating queue limits by adding a store_limit > >> method that allows having common code handle the actual queue limits > >> update. > >> > >> Note that this is a pure refactoring patch and does not address the > >> existing freeze vs limits lock order problem in the refactored code, > >> which will be addressed next. > >> > >> Signed-off-by: Christoph Hellwig <hch@lst.de> ... > > Order between freeze and ->sysfs_lock is changed, and it may cause new > > lockdep warning because we may freeze queue first before acquiring > > ->sysfs_lock in del_gendisk(). > > > On contrary, in elevator_disable() and elevator_switch() we acquire > ->sysfs_lock first before freezing the queue. I think this is a mess and > we need to fix ordering. We need to decide ordering rules. IMO, the > correct order should be to acquire ->sysfs_lock before freezing queue. > Likewise with this patch now we acquire ->limits_lock before freezing the > queue. __blk_mq_update_nr_hw_queues() freezes queue before acquiring ->syfs_lock too. So yes, it is a mess wrt. order between ->sysfs_lock and freezing queue. Thanks, Ming
On Tue, Jan 07, 2025 at 06:25:20PM +0800, Ming Lei wrote: > __blk_mq_update_nr_hw_queues() freezes queue before acquiring ->syfs_lock too. > > So yes, it is a mess wrt. order between ->sysfs_lock and freezing queue. Let's sort out the current freeze vs limits lock issue first. Next step is to kill sysfs_lock in it's current form.
Looks good to me.
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 767598e719ab..8d69315e986d 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -24,6 +24,8 @@ struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct gendisk *disk, char *page); ssize_t (*store)(struct gendisk *disk, const char *page, size_t count); + int (*store_limit)(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim); void (*load_module)(struct gendisk *disk, const char *page, size_t count); }; @@ -153,13 +155,11 @@ QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0) QUEUE_SYSFS_SHOW_CONST(write_same_max, 0) QUEUE_SYSFS_SHOW_CONST(poll_delay, -1) -static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, - const char *page, size_t count) +static int queue_max_discard_sectors_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { unsigned long max_discard_bytes; - struct queue_limits lim; ssize_t ret; - int err; ret = queue_var_store(&max_discard_bytes, page, count); if (ret < 0) @@ -171,38 +171,28 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk, if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) return -EINVAL; - lim = queue_limits_start_update(disk->queue); - lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; - err = queue_limits_commit_update(disk->queue, &lim); - if (err) - return err; - return ret; + lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; + return 0; } -static ssize_t -queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count) +static int +queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, + struct queue_limits *lim) { unsigned long max_sectors_kb; - struct queue_limits lim; ssize_t ret; - int err; ret = queue_var_store(&max_sectors_kb, page, count); if (ret < 0) return ret; - lim = queue_limits_start_update(disk->queue); - lim.max_user_sectors = max_sectors_kb << 1; - err = queue_limits_commit_update(disk->queue, &lim); - if (err) - return err; - return ret; + lim->max_user_sectors = max_sectors_kb << 1; + return 0; } static ssize_t queue_feature_store(struct gendisk *disk, const char *page, - size_t count, blk_features_t feature) + size_t count, struct queue_limits *lim, blk_features_t feature) { - struct queue_limits lim; unsigned long val; ssize_t ret; @@ -210,15 +200,11 @@ static ssize_t queue_feature_store(struct gendisk *disk, const char *page, if (ret < 0) return ret; - lim = queue_limits_start_update(disk->queue); if (val) - lim.features |= feature; + lim->features |= feature; else - lim.features &= ~feature; - ret = queue_limits_commit_update(disk->queue, &lim); - if (ret) - return ret; - return count; + lim->features &= ~feature; + return 0; } #define QUEUE_SYSFS_FEATURE(_name, _feature) \ @@ -227,10 +213,10 @@ static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ return sysfs_emit(page, "%u\n", \ !!(disk->queue->limits.features & _feature)); \ } \ -static ssize_t queue_##_name##_store(struct gendisk *disk, \ - const char *page, size_t count) \ +static int queue_##_name##_store(struct gendisk *disk, \ + const char *page, size_t count, struct queue_limits *lim) \ { \ - return queue_feature_store(disk, page, count, _feature); \ + return queue_feature_store(disk, page, count, lim, _feature); \ } QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) @@ -266,10 +252,9 @@ static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); } -static ssize_t queue_iostats_passthrough_store(struct gendisk *disk, - const char *page, size_t count) +static int queue_iostats_passthrough_store(struct gendisk *disk, + const char *page, size_t count, struct queue_limits *lim) { - struct queue_limits lim; unsigned long ios; ssize_t ret; @@ -277,18 +262,13 @@ static ssize_t queue_iostats_passthrough_store(struct gendisk *disk, if (ret < 0) return ret; - lim = queue_limits_start_update(disk->queue); if (ios) - lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; + lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; else - lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; - - ret = queue_limits_commit_update(disk->queue, &lim); - if (ret) - return ret; - - return count; + lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; + return 0; } + static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) { return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | @@ -391,12 +371,10 @@ static ssize_t queue_wc_show(struct gendisk *disk, char *page) return sysfs_emit(page, "write through\n"); } -static ssize_t queue_wc_store(struct gendisk *disk, const char *page, - size_t count) +static int queue_wc_store(struct gendisk *disk, const char *page, + size_t count, struct queue_limits *lim) { - struct queue_limits lim; bool disable; - int err; if (!strncmp(page, "write back", 10)) { disable = false; @@ -407,15 +385,11 @@ static ssize_t queue_wc_store(struct gendisk *disk, const char *page, return -EINVAL; } - lim = queue_limits_start_update(disk->queue); if (disable) - lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED; + lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; else - lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; - err = queue_limits_commit_update(disk->queue, &lim); - if (err) - return err; - return count; + lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; + return 0; } #define QUEUE_RO_ENTRY(_prefix, _name) \ @@ -431,6 +405,13 @@ static struct queue_sysfs_entry _prefix##_entry = { \ .store = _prefix##_store, \ }; +#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0644 }, \ + .show = _prefix##_show, \ + .store_limit = _prefix##_store, \ +} + #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \ static struct queue_sysfs_entry _prefix##_entry = { \ .attr = { .name = _name, .mode = 0644 }, \ @@ -441,7 +422,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \ QUEUE_RW_ENTRY(queue_requests, "nr_requests"); QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); -QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); +QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); @@ -457,7 +438,7 @@ QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); -QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); +QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); @@ -477,11 +458,11 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); -QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); +QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); QUEUE_RW_ENTRY(queue_poll, "io_poll"); QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); -QUEUE_RW_ENTRY(queue_wc, "write_cache"); +QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); QUEUE_RO_ENTRY(queue_fua, "fua"); QUEUE_RO_ENTRY(queue_dax, "dax"); QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); @@ -494,10 +475,10 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_logical_block_size_show, }; -QUEUE_RW_ENTRY(queue_rotational, "rotational"); -QUEUE_RW_ENTRY(queue_iostats, "iostats"); -QUEUE_RW_ENTRY(queue_add_random, "add_random"); -QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); +QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); +QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); +QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); +QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); #ifdef CONFIG_BLK_WBT static ssize_t queue_var_store64(s64 *var, const char *page) @@ -695,7 +676,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, struct request_queue *q = disk->queue; ssize_t res; - if (!entry->store) + if (!entry->store_limit && !entry->store) return -EIO; /* @@ -706,11 +687,24 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, if (entry->load_module) entry->load_module(disk, page, length); - blk_mq_freeze_queue(q); mutex_lock(&q->sysfs_lock); - res = entry->store(disk, page, length); - mutex_unlock(&q->sysfs_lock); + blk_mq_freeze_queue(q); + if (entry->store_limit) { + struct queue_limits lim = queue_limits_start_update(q); + + res = entry->store_limit(disk, page, length, &lim); + if (res < 0) { + queue_limits_cancel_update(q); + } else { + res = queue_limits_commit_update(q, &lim); + if (!res) + res = length; + } + } else { + res = entry->store(disk, page, length); + } blk_mq_unfreeze_queue(q); + mutex_unlock(&q->sysfs_lock); return res; }
De-duplicate the code for updating queue limits by adding a store_limit method that allows having common code handle the actual queue limits update. Note that this is a pure refactoring patch and does not address the existing freeze vs limits lock order problem in the refactored code, which will be addressed next. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-sysfs.c | 128 ++++++++++++++++++++++------------------------ 1 file changed, 61 insertions(+), 67 deletions(-)