Message ID | 20190801225044.143478-2-bvanassche@acm.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Optimize bio splitting | expand |
Looks good. Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> On 08/01/2019 03:51 PM, Bart Van Assche wrote: > Make it clear to the compiler and also to humans that the functions > that query request queue properties do not modify any member of the > request_queue data structure. > > Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> > Cc: Christoph Hellwig <hch@infradead.org> > Cc: Ming Lei <ming.lei@redhat.com> > Cc: Hannes Reinecke <hare@suse.com> > Signed-off-by: Bart Van Assche <bvanassche@acm.org> > --- > block/blk-merge.c | 7 ++++--- > include/linux/blkdev.h | 32 ++++++++++++++++---------------- > 2 files changed, 20 insertions(+), 19 deletions(-) > > diff --git a/block/blk-merge.c b/block/blk-merge.c > index 57f7990b342d..8344d94f13e0 100644 > --- a/block/blk-merge.c > +++ b/block/blk-merge.c > @@ -144,7 +144,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, > return sectors; > } > > -static unsigned get_max_segment_size(struct request_queue *q, > +static unsigned get_max_segment_size(const struct request_queue *q, > unsigned offset) > { > unsigned long mask = queue_segment_boundary(q); > @@ -161,8 +161,9 @@ static unsigned get_max_segment_size(struct request_queue *q, > * Split the bvec @bv into segments, and update all kinds of > * variables. > */ > -static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, > - unsigned *nsegs, unsigned *sectors, unsigned max_segs) > +static bool bvec_split_segs(const struct request_queue *q, > + const struct bio_vec *bv, unsigned *nsegs, > + unsigned *sectors, unsigned max_segs) > { > unsigned len = bv->bv_len; > unsigned total_len = 0; > diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h > index 1ef375dafb1c..96a29a72fd4a 100644 > --- a/include/linux/blkdev.h > +++ b/include/linux/blkdev.h > @@ -1232,42 +1232,42 @@ enum blk_default_limits { > BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, > }; > > -static inline unsigned long queue_segment_boundary(struct request_queue *q) > +static inline unsigned long queue_segment_boundary(const struct request_queue *q) > { > return q->limits.seg_boundary_mask; > } > > -static inline unsigned long queue_virt_boundary(struct request_queue *q) > +static inline unsigned long queue_virt_boundary(const struct request_queue *q) > { > return q->limits.virt_boundary_mask; > } > > -static inline unsigned int queue_max_sectors(struct request_queue *q) > +static inline unsigned int queue_max_sectors(const struct request_queue *q) > { > return q->limits.max_sectors; > } > > -static inline unsigned int queue_max_hw_sectors(struct request_queue *q) > +static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) > { > return q->limits.max_hw_sectors; > } > > -static inline unsigned short queue_max_segments(struct request_queue *q) > +static inline unsigned short queue_max_segments(const struct request_queue *q) > { > return q->limits.max_segments; > } > > -static inline unsigned short queue_max_discard_segments(struct request_queue *q) > +static inline unsigned short queue_max_discard_segments(const struct request_queue *q) > { > return q->limits.max_discard_segments; > } > > -static inline unsigned int queue_max_segment_size(struct request_queue *q) > +static inline unsigned int queue_max_segment_size(const struct request_queue *q) > { > return q->limits.max_segment_size; > } > > -static inline unsigned short queue_logical_block_size(struct request_queue *q) > +static inline unsigned short queue_logical_block_size(const struct request_queue *q) > { > int retval = 512; > > @@ -1282,7 +1282,7 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev) > return queue_logical_block_size(bdev_get_queue(bdev)); > } > > -static inline unsigned int queue_physical_block_size(struct request_queue *q) > +static inline unsigned int queue_physical_block_size(const struct request_queue *q) > { > return q->limits.physical_block_size; > } > @@ -1292,7 +1292,7 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev) > return queue_physical_block_size(bdev_get_queue(bdev)); > } > > -static inline unsigned int queue_io_min(struct request_queue *q) > +static inline unsigned int queue_io_min(const struct request_queue *q) > { > return q->limits.io_min; > } > @@ -1302,7 +1302,7 @@ static inline int bdev_io_min(struct block_device *bdev) > return queue_io_min(bdev_get_queue(bdev)); > } > > -static inline unsigned int queue_io_opt(struct request_queue *q) > +static inline unsigned int queue_io_opt(const struct request_queue *q) > { > return q->limits.io_opt; > } > @@ -1312,7 +1312,7 @@ static inline int bdev_io_opt(struct block_device *bdev) > return queue_io_opt(bdev_get_queue(bdev)); > } > > -static inline int queue_alignment_offset(struct request_queue *q) > +static inline int queue_alignment_offset(const struct request_queue *q) > { > if (q->limits.misaligned) > return -1; > @@ -1342,7 +1342,7 @@ static inline int bdev_alignment_offset(struct block_device *bdev) > return q->limits.alignment_offset; > } > > -static inline int queue_discard_alignment(struct request_queue *q) > +static inline int queue_discard_alignment(const struct request_queue *q) > { > if (q->limits.discard_misaligned) > return -1; > @@ -1432,7 +1432,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev) > return 0; > } > > -static inline int queue_dma_alignment(struct request_queue *q) > +static inline int queue_dma_alignment(const struct request_queue *q) > { > return q ? q->dma_alignment : 511; > } > @@ -1543,7 +1543,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, > } > > static inline unsigned short > -queue_max_integrity_segments(struct request_queue *q) > +queue_max_integrity_segments(const struct request_queue *q) > { > return q->limits.max_integrity_segments; > } > @@ -1626,7 +1626,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, > unsigned int segs) > { > } > -static inline unsigned short queue_max_integrity_segments(struct request_queue *q) > +static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) > { > return 0; > } >
diff --git a/block/blk-merge.c b/block/blk-merge.c index 57f7990b342d..8344d94f13e0 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -144,7 +144,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, return sectors; } -static unsigned get_max_segment_size(struct request_queue *q, +static unsigned get_max_segment_size(const struct request_queue *q, unsigned offset) { unsigned long mask = queue_segment_boundary(q); @@ -161,8 +161,9 @@ static unsigned get_max_segment_size(struct request_queue *q, * Split the bvec @bv into segments, and update all kinds of * variables. */ -static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, - unsigned *nsegs, unsigned *sectors, unsigned max_segs) +static bool bvec_split_segs(const struct request_queue *q, + const struct bio_vec *bv, unsigned *nsegs, + unsigned *sectors, unsigned max_segs) { unsigned len = bv->bv_len; unsigned total_len = 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1ef375dafb1c..96a29a72fd4a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1232,42 +1232,42 @@ enum blk_default_limits { BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; -static inline unsigned long queue_segment_boundary(struct request_queue *q) +static inline unsigned long queue_segment_boundary(const struct request_queue *q) { return q->limits.seg_boundary_mask; } -static inline unsigned long queue_virt_boundary(struct request_queue *q) +static inline unsigned long queue_virt_boundary(const struct request_queue *q) { return q->limits.virt_boundary_mask; } -static inline unsigned int queue_max_sectors(struct request_queue *q) +static inline unsigned int queue_max_sectors(const struct request_queue *q) { return q->limits.max_sectors; } -static inline unsigned int queue_max_hw_sectors(struct request_queue *q) +static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) { return q->limits.max_hw_sectors; } -static inline unsigned short queue_max_segments(struct request_queue *q) +static inline unsigned short queue_max_segments(const struct request_queue *q) { return q->limits.max_segments; } -static inline unsigned short queue_max_discard_segments(struct request_queue *q) +static inline unsigned short queue_max_discard_segments(const struct request_queue *q) { return q->limits.max_discard_segments; } -static inline unsigned int queue_max_segment_size(struct request_queue *q) +static inline unsigned int queue_max_segment_size(const struct request_queue *q) { return q->limits.max_segment_size; } -static inline unsigned short queue_logical_block_size(struct request_queue *q) +static inline unsigned short queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1282,7 +1282,7 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev) return queue_logical_block_size(bdev_get_queue(bdev)); } -static inline unsigned int queue_physical_block_size(struct request_queue *q) +static inline unsigned int queue_physical_block_size(const struct request_queue *q) { return q->limits.physical_block_size; } @@ -1292,7 +1292,7 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev) return queue_physical_block_size(bdev_get_queue(bdev)); } -static inline unsigned int queue_io_min(struct request_queue *q) +static inline unsigned int queue_io_min(const struct request_queue *q) { return q->limits.io_min; } @@ -1302,7 +1302,7 @@ static inline int bdev_io_min(struct block_device *bdev) return queue_io_min(bdev_get_queue(bdev)); } -static inline unsigned int queue_io_opt(struct request_queue *q) +static inline unsigned int queue_io_opt(const struct request_queue *q) { return q->limits.io_opt; } @@ -1312,7 +1312,7 @@ static inline int bdev_io_opt(struct block_device *bdev) return queue_io_opt(bdev_get_queue(bdev)); } -static inline int queue_alignment_offset(struct request_queue *q) +static inline int queue_alignment_offset(const struct request_queue *q) { if (q->limits.misaligned) return -1; @@ -1342,7 +1342,7 @@ static inline int bdev_alignment_offset(struct block_device *bdev) return q->limits.alignment_offset; } -static inline int queue_discard_alignment(struct request_queue *q) +static inline int queue_discard_alignment(const struct request_queue *q) { if (q->limits.discard_misaligned) return -1; @@ -1432,7 +1432,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev) return 0; } -static inline int queue_dma_alignment(struct request_queue *q) +static inline int queue_dma_alignment(const struct request_queue *q) { return q ? q->dma_alignment : 511; } @@ -1543,7 +1543,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, } static inline unsigned short -queue_max_integrity_segments(struct request_queue *q) +queue_max_integrity_segments(const struct request_queue *q) { return q->limits.max_integrity_segments; } @@ -1626,7 +1626,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, unsigned int segs) { } -static inline unsigned short queue_max_integrity_segments(struct request_queue *q) +static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) { return 0; }