@@ -1694,7 +1694,7 @@ static void null_del_dev(struct nullb *nullb)
dev->nullb = NULL;
}
-static void null_config_discard(struct nullb *nullb)
+static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
{
if (nullb->dev->discard == false)
return;
@@ -1711,7 +1711,7 @@ static void null_config_discard(struct nullb *nullb)
return;
}
- blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+ lim->max_hw_discard_sectors = UINT_MAX >> 9;
}
static const struct block_device_operations null_ops = {
@@ -1869,6 +1869,12 @@ static bool null_setup_fault(void)
static int null_add_dev(struct nullb_device *dev)
{
+ struct queue_limits lim = {
+ .logical_block_size = dev->blocksize,
+ .physical_block_size = dev->blocksize,
+ .max_hw_sectors = dev->max_sectors,
+ };
+
struct nullb *nullb;
int rv;
@@ -1894,7 +1900,18 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
- nullb->disk = blk_mq_alloc_disk(nullb->tag_set, NULL, nullb);
+ if (dev->virt_boundary)
+ lim.virt_boundary_mask = PAGE_SIZE - 1;
+ if (dev->zoned) {
+ lim.zoned = true;
+ lim.chunk_sectors = dev->zone_size_sects;
+ lim.max_zone_append_sectors = dev->zone_size_sects;
+ lim.max_open_zones = dev->zone_max_open;
+ lim.max_active_zones = dev->zone_max_active;
+ }
+ null_config_discard(nullb, &lim);
+
+ nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
goto out_cleanup_tags;
@@ -1930,16 +1947,6 @@ static int null_add_dev(struct nullb_device *dev)
dev->index = rv;
mutex_unlock(&lock);
- blk_queue_logical_block_size(nullb->q, dev->blocksize);
- blk_queue_physical_block_size(nullb->q, dev->blocksize);
- if (dev->max_sectors)
- blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
-
- if (dev->virt_boundary)
- blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
-
- null_config_discard(nullb);
-
if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
snprintf(nullb->disk_name, sizeof(nullb->disk_name),
@@ -156,18 +156,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
int null_register_zoned_dev(struct nullb *nullb)
{
- struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- disk_set_zoned(nullb->disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
- blk_queue_chunk_sectors(q, dev->zone_size_sects);
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
- blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
- disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
- disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
-
return blk_revalidate_disk_zones(nullb->disk, NULL);
}
Pass the queue limits directly to blk_mq_alloc_disk instead of setting them one at a time. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/block/null_blk/main.c | 33 ++++++++++++++++++++------------- drivers/block/null_blk/zoned.c | 7 ------- 2 files changed, 20 insertions(+), 20 deletions(-)