diff mbox

[2/3] xen-blkfront: introduce blkif_set_queue_limits()

Message ID 1468575109-12209-2-git-send-email-bob.liu@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bob Liu July 15, 2016, 9:31 a.m. UTC
blk_mq_update_nr_hw_queues() reset all queue limits to default which it's not
as xen-blkfront expected, introducing blkif_set_queue_limits() to reset limits
with initial correct values.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 drivers/block/xen-blkfront.c | 91 ++++++++++++++++++++++++--------------------
 1 file changed, 50 insertions(+), 41 deletions(-)

Comments

Roger Pau Monné July 21, 2016, 8:29 a.m. UTC | #1
On Fri, Jul 15, 2016 at 05:31:48PM +0800, Bob Liu wrote:
> blk_mq_update_nr_hw_queues() reset all queue limits to default which it's not
> as xen-blkfront expected, introducing blkif_set_queue_limits() to reset limits
> with initial correct values.

Hm, great, and as usual in Linux there isn't even a comment in the function 
that explains what it is supposed to do, or what are the side-effects of 
calling blk_mq_update_nr_hw_queues.
 
> Signed-off-by: Bob Liu <bob.liu@oracle.com>
>
>  drivers/block/xen-blkfront.c | 91 ++++++++++++++++++++++++--------------------
>  1 file changed, 50 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
> index 032fc94..10f46a8 100644
> --- a/drivers/block/xen-blkfront.c
> +++ b/drivers/block/xen-blkfront.c
> @@ -189,6 +189,8 @@ struct blkfront_info
>  	struct mutex mutex;
>  	struct xenbus_device *xbdev;
>  	struct gendisk *gd;
> +	u16 sector_size;
> +	unsigned int physical_sector_size;
>  	int vdevice;
>  	blkif_vdev_t handle;
>  	enum blkif_state connected;
> @@ -913,9 +915,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
>  	.map_queue = blk_mq_map_queue,
>  };
>  
> +static void blkif_set_queue_limits(struct blkfront_info *info)
> +{
> +	struct request_queue *rq = info->rq;
> +	struct gendisk *gd = info->gd;
> +	unsigned int segments = info->max_indirect_segments ? :
> +				BLKIF_MAX_SEGMENTS_PER_REQUEST;
> +
> +	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
> +
> +	if (info->feature_discard) {
> +		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
> +		blk_queue_max_discard_sectors(rq, get_capacity(gd));
> +		rq->limits.discard_granularity = info->discard_granularity;
> +		rq->limits.discard_alignment = info->discard_alignment;
> +		if (info->feature_secdiscard)
> +			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
> +	}

AFAICT, at the point this function is called (in blkfront_resume), the 
value of info->feature_discard is still from the old backend, maybe this 
should be called from blkif_recover after blkfront_gather_backend_features?

Roger.
Bob Liu July 21, 2016, 9:44 a.m. UTC | #2
On 07/21/2016 04:29 PM, Roger Pau Monné wrote:
> On Fri, Jul 15, 2016 at 05:31:48PM +0800, Bob Liu wrote:
>> blk_mq_update_nr_hw_queues() reset all queue limits to default which it's not
>> as xen-blkfront expected, introducing blkif_set_queue_limits() to reset limits
>> with initial correct values.
> 
> Hm, great, and as usual in Linux there isn't even a comment in the function 
> that explains what it is supposed to do, or what are the side-effects of 
> calling blk_mq_update_nr_hw_queues.
>  
>> Signed-off-by: Bob Liu <bob.liu@oracle.com>
>>
>>  drivers/block/xen-blkfront.c | 91 ++++++++++++++++++++++++--------------------
>>  1 file changed, 50 insertions(+), 41 deletions(-)
>>
>> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
>> index 032fc94..10f46a8 100644
>> --- a/drivers/block/xen-blkfront.c
>> +++ b/drivers/block/xen-blkfront.c
>> @@ -189,6 +189,8 @@ struct blkfront_info
>>  	struct mutex mutex;
>>  	struct xenbus_device *xbdev;
>>  	struct gendisk *gd;
>> +	u16 sector_size;
>> +	unsigned int physical_sector_size;
>>  	int vdevice;
>>  	blkif_vdev_t handle;
>>  	enum blkif_state connected;
>> @@ -913,9 +915,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
>>  	.map_queue = blk_mq_map_queue,
>>  };
>>  
>> +static void blkif_set_queue_limits(struct blkfront_info *info)
>> +{
>> +	struct request_queue *rq = info->rq;
>> +	struct gendisk *gd = info->gd;
>> +	unsigned int segments = info->max_indirect_segments ? :
>> +				BLKIF_MAX_SEGMENTS_PER_REQUEST;
>> +
>> +	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
>> +
>> +	if (info->feature_discard) {
>> +		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
>> +		blk_queue_max_discard_sectors(rq, get_capacity(gd));
>> +		rq->limits.discard_granularity = info->discard_granularity;
>> +		rq->limits.discard_alignment = info->discard_alignment;
>> +		if (info->feature_secdiscard)
>> +			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
>> +	}
> 
> AFAICT, at the point this function is called (in blkfront_resume), the 
> value of info->feature_discard is still from the old backend, maybe this 
> should be called from blkif_recover after blkfront_gather_backend_features?
> 

Thank you for pointing out, will be fixed.
diff mbox

Patch

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 032fc94..10f46a8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -189,6 +189,8 @@  struct blkfront_info
 	struct mutex mutex;
 	struct xenbus_device *xbdev;
 	struct gendisk *gd;
+	u16 sector_size;
+	unsigned int physical_sector_size;
 	int vdevice;
 	blkif_vdev_t handle;
 	enum blkif_state connected;
@@ -913,9 +915,45 @@  static struct blk_mq_ops blkfront_mq_ops = {
 	.map_queue = blk_mq_map_queue,
 };
 
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+	struct request_queue *rq = info->rq;
+	struct gendisk *gd = info->gd;
+	unsigned int segments = info->max_indirect_segments ? :
+				BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+	if (info->feature_discard) {
+		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+		blk_queue_max_discard_sectors(rq, get_capacity(gd));
+		rq->limits.discard_granularity = info->discard_granularity;
+		rq->limits.discard_alignment = info->discard_alignment;
+		if (info->feature_secdiscard)
+			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
+	}
+
+	/* Hard sector size and max sectors impersonate the equiv. hardware. */
+	blk_queue_logical_block_size(rq, info->sector_size);
+	blk_queue_physical_block_size(rq, info->physical_sector_size);
+	blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+	/* Each segment in a request is up to an aligned page in size. */
+	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+	blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+	/* Ensure a merged request will fit in a single I/O ring slot. */
+	blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+	/* Make sure buffer addresses are sector-aligned. */
+	blk_queue_dma_alignment(rq, 511);
+
+	/* Make sure we don't use bounce buffers. */
+	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
-				unsigned int physical_sector_size,
-				unsigned int segments)
+				unsigned int physical_sector_size)
 {
 	struct request_queue *rq;
 	struct blkfront_info *info = gd->private_data;
@@ -947,37 +985,11 @@  static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
 	}
 
 	rq->queuedata = info;
-	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
-	if (info->feature_discard) {
-		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
-		blk_queue_max_discard_sectors(rq, get_capacity(gd));
-		rq->limits.discard_granularity = info->discard_granularity;
-		rq->limits.discard_alignment = info->discard_alignment;
-		if (info->feature_secdiscard)
-			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
-	}
-
-	/* Hard sector size and max sectors impersonate the equiv. hardware. */
-	blk_queue_logical_block_size(rq, sector_size);
-	blk_queue_physical_block_size(rq, physical_sector_size);
-	blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
-	/* Each segment in a request is up to an aligned page in size. */
-	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-	blk_queue_max_segment_size(rq, PAGE_SIZE);
-
-	/* Ensure a merged request will fit in a single I/O ring slot. */
-	blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
-	/* Make sure buffer addresses are sector-aligned. */
-	blk_queue_dma_alignment(rq, 511);
-
-	/* Make sure we don't use bounce buffers. */
-	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
-	gd->queue = rq;
-
+	info->rq = gd->queue = rq;
+	info->gd = gd;
+	info->sector_size = sector_size;
+	info->physical_sector_size = physical_sector_size;
+	blkif_set_queue_limits(info);
 	return 0;
 }
 
@@ -1142,16 +1154,11 @@  static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
 	gd->driverfs_dev = &(info->xbdev->dev);
 	set_capacity(gd, capacity);
 
-	if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
-				 info->max_indirect_segments ? :
-				 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+	if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
 		del_gendisk(gd);
 		goto release;
 	}
 
-	info->rq = gd->queue;
-	info->gd = gd;
-
 	xlvbd_flush(info);
 
 	if (vdisk_info & VDISK_READONLY)
@@ -2132,9 +2139,11 @@  static int blkfront_resume(struct xenbus_device *dev)
 		return err;
 
 	err = talk_to_blkback(dev, info);
-	if (!err)
+	if (!err) {
 		blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
-
+		/* Reset limits changed by blk-mq. */
+		blkif_set_queue_limits(info);
+	}
 	/*
 	 * We have to wait for the backend to switch to
 	 * connected state, since we want to read which