Message ID | 20240215070300.2200308-8-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/17] ubd: pass queue_limits to blk_mq_alloc_disk | expand |
On Thu, Feb 15, 2024 at 8:03 AM Christoph Hellwig <hch@lst.de> wrote: > > Pass the limits rbd imposes directly to blk_mq_alloc_disk instead > of setting them one at a time. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > drivers/block/rbd.c | 29 +++++++++++++++-------------- > 1 file changed, 15 insertions(+), 14 deletions(-) > > diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c > index 6b4f1898a722a3..26ff5cd2bf0abc 100644 > --- a/drivers/block/rbd.c > +++ b/drivers/block/rbd.c > @@ -4952,6 +4952,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) > struct request_queue *q; > unsigned int objset_bytes = > rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; > + struct queue_limits lim = { > + .max_hw_sectors = objset_bytes >> SECTOR_SHIFT, > + .max_user_sectors = objset_bytes >> SECTOR_SHIFT, > + .io_min = rbd_dev->opts->alloc_size, > + .io_opt = rbd_dev->opts->alloc_size, > + .max_segments = USHRT_MAX, > + .max_segment_size = UINT_MAX, > + }; > int err; > > memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); > @@ -4966,7 +4974,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) > if (err) > return err; > > - disk = blk_mq_alloc_disk(&rbd_dev->tag_set, NULL, rbd_dev); > + if (rbd_dev->opts->trim) { > + lim.discard_granularity = rbd_dev->opts->alloc_size; > + lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT; > + lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT; > + } > + > + disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev); > if (IS_ERR(disk)) { > err = PTR_ERR(disk); > goto out_tag_set; > @@ -4987,19 +5001,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) > blk_queue_flag_set(QUEUE_FLAG_NONROT, q); > /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ > > - blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); > - q->limits.max_sectors = queue_max_hw_sectors(q); > - blk_queue_max_segments(q, USHRT_MAX); > - blk_queue_max_segment_size(q, UINT_MAX); > - blk_queue_io_min(q, rbd_dev->opts->alloc_size); > - blk_queue_io_opt(q, rbd_dev->opts->alloc_size); > - > - if (rbd_dev->opts->trim) { > - q->limits.discard_granularity = rbd_dev->opts->alloc_size; > - blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); > - blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); > - } > - > if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) > blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); > > -- > 2.39.2 > Acked-by: Ilya Dryomov <idryomov@gmail.com> Thanks, Ilya
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6b4f1898a722a3..26ff5cd2bf0abc 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4952,6 +4952,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) struct request_queue *q; unsigned int objset_bytes = rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; + struct queue_limits lim = { + .max_hw_sectors = objset_bytes >> SECTOR_SHIFT, + .max_user_sectors = objset_bytes >> SECTOR_SHIFT, + .io_min = rbd_dev->opts->alloc_size, + .io_opt = rbd_dev->opts->alloc_size, + .max_segments = USHRT_MAX, + .max_segment_size = UINT_MAX, + }; int err; memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); @@ -4966,7 +4974,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (err) return err; - disk = blk_mq_alloc_disk(&rbd_dev->tag_set, NULL, rbd_dev); + if (rbd_dev->opts->trim) { + lim.discard_granularity = rbd_dev->opts->alloc_size; + lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT; + lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT; + } + + disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_tag_set; @@ -4987,19 +5001,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); - q->limits.max_sectors = queue_max_hw_sectors(q); - blk_queue_max_segments(q, USHRT_MAX); - blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, rbd_dev->opts->alloc_size); - blk_queue_io_opt(q, rbd_dev->opts->alloc_size); - - if (rbd_dev->opts->trim) { - q->limits.discard_granularity = rbd_dev->opts->alloc_size; - blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); - blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); - } - if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
Pass the limits rbd imposes directly to blk_mq_alloc_disk instead of setting them one at a time. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/block/rbd.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-)