diff mbox series

[vhost,16/23] vdpa/mlx5: Add error code for suspend/resume VQ

Message ID 20240617-stage-vdpa-vq-precreate-v1-16-8c0483f0ca2a@nvidia.com (mailing list archive)
State Superseded
Headers show
Series vdpa/mlx5: Pre-create HW VQs to reduce LM downtime | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Dragos Tatulea June 17, 2024, 3:07 p.m. UTC
Instead of blindly calling suspend/resume_vqs(), make then return error
codes.

To keep compatibility, keep suspending or resuming VQs on error and
return the last error code. The assumption here is that the error code
would be the same.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
---
 drivers/vdpa/mlx5/net/mlx5_vnet.c | 77 +++++++++++++++++++++++++++------------
 1 file changed, 54 insertions(+), 23 deletions(-)

Comments

Eugenio Perez Martin June 19, 2024, 3:41 p.m. UTC | #1
On Mon, Jun 17, 2024 at 5:09 PM Dragos Tatulea <dtatulea@nvidia.com> wrote:
>
> Instead of blindly calling suspend/resume_vqs(), make then return error
> codes.
>
> To keep compatibility, keep suspending or resuming VQs on error and
> return the last error code. The assumption here is that the error code
> would be the same.
>
> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
> Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>

Acked-by: Eugenio Pérez <eperezma@redhat.com>

> ---
>  drivers/vdpa/mlx5/net/mlx5_vnet.c | 77 +++++++++++++++++++++++++++------------
>  1 file changed, 54 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index e4d68d2d0bb4..e3a82c43b44e 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1526,71 +1526,102 @@ static int setup_vq(struct mlx5_vdpa_net *ndev,
>         return err;
>  }
>
> -static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
> +static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
>  {
>         struct mlx5_virtq_attr attr;
> +       int err;
>
>         if (!mvq->initialized)
> -               return;
> +               return 0;
>
>         if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
> -               return;
> +               return 0;
>
> -       if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
> -               mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
> +       err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
> +       if (err) {
> +               mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
> +               return err;
> +       }
>
> -       if (query_virtqueue(ndev, mvq, &attr)) {
> -               mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
> -               return;
> +       err = query_virtqueue(ndev, mvq, &attr);
> +       if (err) {
> +               mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
> +               return err;
>         }
> +
>         mvq->avail_idx = attr.available_index;
>         mvq->used_idx = attr.used_index;
> +
> +       return 0;
>  }
>
> -static void suspend_vqs(struct mlx5_vdpa_net *ndev)
> +static int suspend_vqs(struct mlx5_vdpa_net *ndev)
>  {
> +       int err = 0;
>         int i;
>
> -       for (i = 0; i < ndev->cur_num_vqs; i++)
> -               suspend_vq(ndev, &ndev->vqs[i]);
> +       for (i = 0; i < ndev->cur_num_vqs; i++) {
> +               int local_err = suspend_vq(ndev, &ndev->vqs[i]);
> +
> +               err = local_err ? local_err : err;
> +       }
> +
> +       return err;
>  }
>
> -static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
> +static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
>  {
> +       int err;
> +
>         if (!mvq->initialized)
> -               return;
> +               return 0;
>
>         switch (mvq->fw_state) {
>         case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
>                 /* Due to a FW quirk we need to modify the VQ fields first then change state.
>                  * This should be fixed soon. After that, a single command can be used.
>                  */
> -               if (modify_virtqueue(ndev, mvq, 0))
> +               err = modify_virtqueue(ndev, mvq, 0);
> +               if (err) {
>                         mlx5_vdpa_warn(&ndev->mvdev,
> -                               "modify vq properties failed for vq %u\n", mvq->index);
> +                               "modify vq properties failed for vq %u, err: %d\n",
> +                               mvq->index, err);
> +                       return err;
> +               }
>                 break;
>         case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
>                 if (!is_resumable(ndev)) {
>                         mlx5_vdpa_warn(&ndev->mvdev, "vq %d is not resumable\n", mvq->index);
> -                       return;
> +                       return -EINVAL;
>                 }
>                 break;
>         case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
> -               return;
> +               return 0;
>         default:
>                 mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n",
>                                mvq->index, mvq->fw_state);
> -               return;
> +               return -EINVAL;
>         }
>
> -       if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
> -               mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index);
> +       err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
> +       if (err)
> +               mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
> +                              mvq->index, err);
> +
> +       return err;
>  }
>
> -static void resume_vqs(struct mlx5_vdpa_net *ndev)
> +static int resume_vqs(struct mlx5_vdpa_net *ndev)
>  {
> -       for (int i = 0; i < ndev->cur_num_vqs; i++)
> -               resume_vq(ndev, &ndev->vqs[i]);
> +       int err = 0;
> +
> +       for (int i = 0; i < ndev->cur_num_vqs; i++) {
> +               int local_err = resume_vq(ndev, &ndev->vqs[i]);
> +
> +               err = local_err ? local_err : err;
> +       }
> +
> +       return err;
>  }
>
>  static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
>
> --
> 2.45.1
>
diff mbox series

Patch

diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e4d68d2d0bb4..e3a82c43b44e 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1526,71 +1526,102 @@  static int setup_vq(struct mlx5_vdpa_net *ndev,
 	return err;
 }
 
-static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
 {
 	struct mlx5_virtq_attr attr;
+	int err;
 
 	if (!mvq->initialized)
-		return;
+		return 0;
 
 	if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
-		return;
+		return 0;
 
-	if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
-		mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
+	err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
+	if (err) {
+		mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
+		return err;
+	}
 
-	if (query_virtqueue(ndev, mvq, &attr)) {
-		mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
-		return;
+	err = query_virtqueue(ndev, mvq, &attr);
+	if (err) {
+		mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
+		return err;
 	}
+
 	mvq->avail_idx = attr.available_index;
 	mvq->used_idx = attr.used_index;
+
+	return 0;
 }
 
-static void suspend_vqs(struct mlx5_vdpa_net *ndev)
+static int suspend_vqs(struct mlx5_vdpa_net *ndev)
 {
+	int err = 0;
 	int i;
 
-	for (i = 0; i < ndev->cur_num_vqs; i++)
-		suspend_vq(ndev, &ndev->vqs[i]);
+	for (i = 0; i < ndev->cur_num_vqs; i++) {
+		int local_err = suspend_vq(ndev, &ndev->vqs[i]);
+
+		err = local_err ? local_err : err;
+	}
+
+	return err;
 }
 
-static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
 {
+	int err;
+
 	if (!mvq->initialized)
-		return;
+		return 0;
 
 	switch (mvq->fw_state) {
 	case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
 		/* Due to a FW quirk we need to modify the VQ fields first then change state.
 		 * This should be fixed soon. After that, a single command can be used.
 		 */
-		if (modify_virtqueue(ndev, mvq, 0))
+		err = modify_virtqueue(ndev, mvq, 0);
+		if (err) {
 			mlx5_vdpa_warn(&ndev->mvdev,
-				"modify vq properties failed for vq %u\n", mvq->index);
+				"modify vq properties failed for vq %u, err: %d\n",
+				mvq->index, err);
+			return err;
+		}
 		break;
 	case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
 		if (!is_resumable(ndev)) {
 			mlx5_vdpa_warn(&ndev->mvdev, "vq %d is not resumable\n", mvq->index);
-			return;
+			return -EINVAL;
 		}
 		break;
 	case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
-		return;
+		return 0;
 	default:
 		mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n",
 			       mvq->index, mvq->fw_state);
-		return;
+		return -EINVAL;
 	}
 
-	if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
-		mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index);
+	err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+	if (err)
+		mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
+			       mvq->index, err);
+
+	return err;
 }
 
-static void resume_vqs(struct mlx5_vdpa_net *ndev)
+static int resume_vqs(struct mlx5_vdpa_net *ndev)
 {
-	for (int i = 0; i < ndev->cur_num_vqs; i++)
-		resume_vq(ndev, &ndev->vqs[i]);
+	int err = 0;
+
+	for (int i = 0; i < ndev->cur_num_vqs; i++) {
+		int local_err = resume_vq(ndev, &ndev->vqs[i]);
+
+		err = local_err ? local_err : err;
+	}
+
+	return err;
 }
 
 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)