Message ID | 20250107182516.48723-1-andrew.boyer@amd.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | virtio_blk: always post notifications under the lock | expand |
On Wed, Jan 8, 2025 at 2:27 AM Andrew Boyer <andrew.boyer@amd.com> wrote: > > Commit af8ececda185 ("virtio: add VIRTIO_F_NOTIFICATION_DATA feature > support") added notification data support to the core virtio driver > code. When this feature is enabled, the notification includes the > updated producer index for the queue. Thus it is now critical that > notifications arrive in order. > > The virtio_blk driver has historically not worried about notification > ordering. Modify it so that the prepare and kick steps are both done > under the vq lock. Do we have performance numbers when VIRTIO_F_NOTIFICATION_DATA is not negotiated? We need to make sure it doesn't introduce any regression in the case like virtualization setup since now there could be an vmexit when holding the virtqueue lock. Thanks > > Signed-off-by: Andrew Boyer <andrew.boyer@amd.com> > Reviewed-by: Brett Creeley <brett.creeley@amd.com> > Fixes: af8ececda185 ("virtio: add VIRTIO_F_NOTIFICATION_DATA feature support") > Cc: Viktor Prutyanov <viktor@daynix.com> > Cc: virtualization@lists.linux.dev > Cc: linux-block@vger.kernel.org > --- > drivers/block/virtio_blk.c | 19 ++++--------------- > 1 file changed, 4 insertions(+), 15 deletions(-) > > diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c > index 3efe378f1386..14d9e66bb844 100644 > --- a/drivers/block/virtio_blk.c > +++ b/drivers/block/virtio_blk.c > @@ -379,14 +379,10 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) > { > struct virtio_blk *vblk = hctx->queue->queuedata; > struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; > - bool kick; > > spin_lock_irq(&vq->lock); > - kick = virtqueue_kick_prepare(vq->vq); > + virtqueue_kick(vq->vq); > spin_unlock_irq(&vq->lock); > - > - if (kick) > - virtqueue_notify(vq->vq); > } > > static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) > @@ -432,7 +428,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, > struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); > unsigned long flags; > int qid = hctx->queue_num; > - bool notify = false; > blk_status_t status; > int err; > > @@ -454,12 +449,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, > return virtblk_fail_to_queue(req, err); > } > > - if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) > - notify = true; > + if (bd->last) > + virtqueue_kick(vblk->vqs[qid].vq); > spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); > > - if (notify) > - virtqueue_notify(vblk->vqs[qid].vq); > return BLK_STS_OK; > } > > @@ -476,7 +469,6 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq, > { > struct request *req; > unsigned long flags; > - bool kick; > > spin_lock_irqsave(&vq->lock, flags); > > @@ -492,11 +484,8 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq, > } > } > > - kick = virtqueue_kick_prepare(vq->vq); > + virtqueue_kick(vq->vq); > spin_unlock_irqrestore(&vq->lock, flags); > - > - if (kick) > - virtqueue_notify(vq->vq); > } > > static void virtio_queue_rqs(struct rq_list *rqlist) > -- > 2.17.1 >
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 3efe378f1386..14d9e66bb844 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -379,14 +379,10 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) { struct virtio_blk *vblk = hctx->queue->queuedata; struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; - bool kick; spin_lock_irq(&vq->lock); - kick = virtqueue_kick_prepare(vq->vq); + virtqueue_kick(vq->vq); spin_unlock_irq(&vq->lock); - - if (kick) - virtqueue_notify(vq->vq); } static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) @@ -432,7 +428,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; int qid = hctx->queue_num; - bool notify = false; blk_status_t status; int err; @@ -454,12 +449,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, return virtblk_fail_to_queue(req, err); } - if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) - notify = true; + if (bd->last) + virtqueue_kick(vblk->vqs[qid].vq); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - if (notify) - virtqueue_notify(vblk->vqs[qid].vq); return BLK_STS_OK; } @@ -476,7 +469,6 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq, { struct request *req; unsigned long flags; - bool kick; spin_lock_irqsave(&vq->lock, flags); @@ -492,11 +484,8 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq, } } - kick = virtqueue_kick_prepare(vq->vq); + virtqueue_kick(vq->vq); spin_unlock_irqrestore(&vq->lock, flags); - - if (kick) - virtqueue_notify(vq->vq); } static void virtio_queue_rqs(struct rq_list *rqlist)