Message ID | 20240821114100.2261167-6-dtatulea@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | vdpa/mlx5: Optimze MKEY operations | expand |
On Wed, Aug 21, 2024 at 1:42 PM Dragos Tatulea <dtatulea@nvidia.com> wrote: > > Group all mapping related resources into their own structure. > > Upcoming patches will add more members in this new structure. > > Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> > Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com> Acked-by: Eugenio Pérez <eperezma@redhat.com> > --- > drivers/vdpa/mlx5/core/mlx5_vdpa.h | 13 ++++++----- > drivers/vdpa/mlx5/core/mr.c | 30 ++++++++++++------------- > drivers/vdpa/mlx5/core/resources.c | 6 ++--- > drivers/vdpa/mlx5/net/mlx5_vnet.c | 36 +++++++++++++++--------------- > 4 files changed, 44 insertions(+), 41 deletions(-) > > diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h > index 4d217d18239c..5ae6deea2a8a 100644 > --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h > +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h > @@ -83,10 +83,18 @@ enum { > MLX5_VDPA_NUM_AS = 2 > }; > > +struct mlx5_vdpa_mr_resources { > + struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; > + unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; > + struct list_head mr_list_head; > + struct mutex mr_mtx; > +}; > + > struct mlx5_vdpa_dev { > struct vdpa_device vdev; > struct mlx5_core_dev *mdev; > struct mlx5_vdpa_resources res; > + struct mlx5_vdpa_mr_resources mres; > > u64 mlx_features; > u64 actual_features; > @@ -95,13 +103,8 @@ struct mlx5_vdpa_dev { > u16 max_idx; > u32 generation; > > - struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; > - struct list_head mr_list_head; > - /* serialize mr access */ > - struct mutex mr_mtx; > struct mlx5_control_vq cvq; > struct workqueue_struct *wq; > - unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; > bool suspended; > > struct mlx5_async_ctx async_ctx; > diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c > index 149edea09c8f..2c8660e5c0de 100644 > --- a/drivers/vdpa/mlx5/core/mr.c > +++ b/drivers/vdpa/mlx5/core/mr.c > @@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, > void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, > struct mlx5_vdpa_mr *mr) > { > - mutex_lock(&mvdev->mr_mtx); > + mutex_lock(&mvdev->mres.mr_mtx); > _mlx5_vdpa_put_mr(mvdev, mr); > - mutex_unlock(&mvdev->mr_mtx); > + mutex_unlock(&mvdev->mres.mr_mtx); > } > > static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, > @@ -683,39 +683,39 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, > void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, > struct mlx5_vdpa_mr *mr) > { > - mutex_lock(&mvdev->mr_mtx); > + mutex_lock(&mvdev->mres.mr_mtx); > _mlx5_vdpa_get_mr(mvdev, mr); > - mutex_unlock(&mvdev->mr_mtx); > + mutex_unlock(&mvdev->mres.mr_mtx); > } > > void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, > struct mlx5_vdpa_mr *new_mr, > unsigned int asid) > { > - struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid]; > + struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid]; > > - mutex_lock(&mvdev->mr_mtx); > + mutex_lock(&mvdev->mres.mr_mtx); > > _mlx5_vdpa_put_mr(mvdev, old_mr); > - mvdev->mr[asid] = new_mr; > + mvdev->mres.mr[asid] = new_mr; > > - mutex_unlock(&mvdev->mr_mtx); > + mutex_unlock(&mvdev->mres.mr_mtx); > } > > static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) > { > struct mlx5_vdpa_mr *mr; > > - mutex_lock(&mvdev->mr_mtx); > + mutex_lock(&mvdev->mres.mr_mtx); > > - list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) { > + list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) { > > mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: " > "mr: %p, mkey: 0x%x, refcount: %u\n", > mr, mr->mkey, refcount_read(&mr->refcount)); > } > > - mutex_unlock(&mvdev->mr_mtx); > + mutex_unlock(&mvdev->mres.mr_mtx); > > } > > @@ -753,7 +753,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, > if (err) > goto err_iotlb; > > - list_add_tail(&mr->mr_list, &mvdev->mr_list_head); > + list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head); > > return 0; > > @@ -779,9 +779,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, > if (!mr) > return ERR_PTR(-ENOMEM); > > - mutex_lock(&mvdev->mr_mtx); > + mutex_lock(&mvdev->mres.mr_mtx); > err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); > - mutex_unlock(&mvdev->mr_mtx); > + mutex_unlock(&mvdev->mres.mr_mtx); > > if (err) > goto out_err; > @@ -801,7 +801,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, > { > int err; > > - if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) > + if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid) > return 0; > > spin_lock(&mvdev->cvq.iommu_lock); > diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c > index 22ea32fe007b..3e3b3049cb08 100644 > --- a/drivers/vdpa/mlx5/core/resources.c > +++ b/drivers/vdpa/mlx5/core/resources.c > @@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) > mlx5_vdpa_warn(mvdev, "resources already allocated\n"); > return -EINVAL; > } > - mutex_init(&mvdev->mr_mtx); > + mutex_init(&mvdev->mres.mr_mtx); > res->uar = mlx5_get_uars_page(mdev); > if (IS_ERR(res->uar)) { > err = PTR_ERR(res->uar); > @@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) > err_uctx: > mlx5_put_uars_page(mdev, res->uar); > err_uars: > - mutex_destroy(&mvdev->mr_mtx); > + mutex_destroy(&mvdev->mres.mr_mtx); > return err; > } > > @@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) > dealloc_pd(mvdev, res->pdn, res->uid); > destroy_uctx(mvdev, res->uid); > mlx5_put_uars_page(mvdev->mdev, res->uar); > - mutex_destroy(&mvdev->mr_mtx); > + mutex_destroy(&mvdev->mres.mr_mtx); > res->valid = false; > } > > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c > index cf2b77ebc72b..3e55a7f1afcd 100644 > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c > @@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, > MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); > MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); > > - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; > + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; > if (vq_mr) > MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); > > - vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; > + vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; > if (vq_desc_mr && > MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) > MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); > @@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, > /* If there is no mr update, make sure that the existing ones are set > * modify to ready. > */ > - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; > + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; > if (vq_mr) > mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; > > - vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; > + vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; > if (vq_desc_mr) > mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; > } > @@ -1354,7 +1354,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, > } > > if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { > - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; > + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; > > if (vq_mr) > MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); > @@ -1363,7 +1363,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, > } > > if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { > - desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; > + desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; > > if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) > MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); > @@ -1381,8 +1381,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, > struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; > > if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { > - unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]; > - struct mlx5_vdpa_mr *vq_mr = mvdev->mr[asid]; > + unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]; > + struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid]; > > mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); > mlx5_vdpa_get_mr(mvdev, vq_mr); > @@ -1390,8 +1390,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, > } > > if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { > - unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; > - struct mlx5_vdpa_mr *desc_mr = mvdev->mr[asid]; > + unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; > + struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid]; > > mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); > mlx5_vdpa_get_mr(mvdev, desc_mr); > @@ -3235,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) > > /* default mapping all groups are mapped to asid 0 */ > for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++) > - mvdev->group2asid[i] = 0; > + mvdev->mres.group2asid[i] = 0; > } > > static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev) > @@ -3353,7 +3353,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, > new_mr = NULL; > } > > - if (!mvdev->mr[asid]) { > + if (!mvdev->mres.mr[asid]) { > mlx5_vdpa_update_mr(mvdev, new_mr, asid); > } else { > err = mlx5_vdpa_change_map(mvdev, new_mr, asid); > @@ -3637,12 +3637,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, > if (group >= MLX5_VDPA_NUMVQ_GROUPS) > return -EINVAL; > > - mvdev->group2asid[group] = asid; > + mvdev->mres.group2asid[group] = asid; > > - mutex_lock(&mvdev->mr_mtx); > - if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid]) > - err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid); > - mutex_unlock(&mvdev->mr_mtx); > + mutex_lock(&mvdev->mres.mr_mtx); > + if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) > + err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); > + mutex_unlock(&mvdev->mres.mr_mtx); > > return err; > } > @@ -3962,7 +3962,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, > if (err) > goto err_mpfs; > > - INIT_LIST_HEAD(&mvdev->mr_list_head); > + INIT_LIST_HEAD(&mvdev->mres.mr_list_head); > > if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { > err = mlx5_vdpa_create_dma_mr(mvdev); > -- > 2.45.1 >
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 4d217d18239c..5ae6deea2a8a 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -83,10 +83,18 @@ enum { MLX5_VDPA_NUM_AS = 2 }; +struct mlx5_vdpa_mr_resources { + struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; + unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; + struct list_head mr_list_head; + struct mutex mr_mtx; +}; + struct mlx5_vdpa_dev { struct vdpa_device vdev; struct mlx5_core_dev *mdev; struct mlx5_vdpa_resources res; + struct mlx5_vdpa_mr_resources mres; u64 mlx_features; u64 actual_features; @@ -95,13 +103,8 @@ struct mlx5_vdpa_dev { u16 max_idx; u32 generation; - struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; - struct list_head mr_list_head; - /* serialize mr access */ - struct mutex mr_mtx; struct mlx5_control_vq cvq; struct workqueue_struct *wq; - unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; bool suspended; struct mlx5_async_ctx async_ctx; diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 149edea09c8f..2c8660e5c0de 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.mr_mtx); _mlx5_vdpa_put_mr(mvdev, mr); - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.mr_mtx); } static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, @@ -683,39 +683,39 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.mr_mtx); _mlx5_vdpa_get_mr(mvdev, mr); - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.mr_mtx); } void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *new_mr, unsigned int asid) { - struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid]; + struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid]; - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.mr_mtx); _mlx5_vdpa_put_mr(mvdev, old_mr); - mvdev->mr[asid] = new_mr; + mvdev->mres.mr[asid] = new_mr; - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.mr_mtx); } static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) { struct mlx5_vdpa_mr *mr; - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.mr_mtx); - list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) { + list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) { mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: " "mr: %p, mkey: 0x%x, refcount: %u\n", mr, mr->mkey, refcount_read(&mr->refcount)); } - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.mr_mtx); } @@ -753,7 +753,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (err) goto err_iotlb; - list_add_tail(&mr->mr_list, &mvdev->mr_list_head); + list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head); return 0; @@ -779,9 +779,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, if (!mr) return ERR_PTR(-ENOMEM); - mutex_lock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.mr_mtx); err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); - mutex_unlock(&mvdev->mr_mtx); + mutex_unlock(&mvdev->mres.mr_mtx); if (err) goto out_err; @@ -801,7 +801,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, { int err; - if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) + if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid) return 0; spin_lock(&mvdev->cvq.iommu_lock); diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c index 22ea32fe007b..3e3b3049cb08 100644 --- a/drivers/vdpa/mlx5/core/resources.c +++ b/drivers/vdpa/mlx5/core/resources.c @@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) mlx5_vdpa_warn(mvdev, "resources already allocated\n"); return -EINVAL; } - mutex_init(&mvdev->mr_mtx); + mutex_init(&mvdev->mres.mr_mtx); res->uar = mlx5_get_uars_page(mdev); if (IS_ERR(res->uar)) { err = PTR_ERR(res->uar); @@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) err_uctx: mlx5_put_uars_page(mdev, res->uar); err_uars: - mutex_destroy(&mvdev->mr_mtx); + mutex_destroy(&mvdev->mres.mr_mtx); return err; } @@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) dealloc_pd(mvdev, res->pdn, res->uid); destroy_uctx(mvdev, res->uid); mlx5_put_uars_page(mvdev->mdev, res->uar); - mutex_destroy(&mvdev->mr_mtx); + mutex_destroy(&mvdev->mres.mr_mtx); res->valid = false; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index cf2b77ebc72b..3e55a7f1afcd 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; if (vq_mr) MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); - vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); @@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, /* If there is no mr update, make sure that the existing ones are set * modify to ready. */ - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; if (vq_mr) mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; - vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; if (vq_desc_mr) mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; } @@ -1354,7 +1354,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { - vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; + vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]]; if (vq_mr) MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); @@ -1363,7 +1363,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { - desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; + desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); @@ -1381,8 +1381,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { - unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]; - struct mlx5_vdpa_mr *vq_mr = mvdev->mr[asid]; + unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]; + struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid]; mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); mlx5_vdpa_get_mr(mvdev, vq_mr); @@ -1390,8 +1390,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, } if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { - unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; - struct mlx5_vdpa_mr *desc_mr = mvdev->mr[asid]; + unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; + struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid]; mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); mlx5_vdpa_get_mr(mvdev, desc_mr); @@ -3235,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) /* default mapping all groups are mapped to asid 0 */ for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++) - mvdev->group2asid[i] = 0; + mvdev->mres.group2asid[i] = 0; } static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev) @@ -3353,7 +3353,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, new_mr = NULL; } - if (!mvdev->mr[asid]) { + if (!mvdev->mres.mr[asid]) { mlx5_vdpa_update_mr(mvdev, new_mr, asid); } else { err = mlx5_vdpa_change_map(mvdev, new_mr, asid); @@ -3637,12 +3637,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, if (group >= MLX5_VDPA_NUMVQ_GROUPS) return -EINVAL; - mvdev->group2asid[group] = asid; + mvdev->mres.group2asid[group] = asid; - mutex_lock(&mvdev->mr_mtx); - if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid]) - err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid); - mutex_unlock(&mvdev->mr_mtx); + mutex_lock(&mvdev->mres.mr_mtx); + if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) + err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); + mutex_unlock(&mvdev->mres.mr_mtx); return err; } @@ -3962,7 +3962,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, if (err) goto err_mpfs; - INIT_LIST_HEAD(&mvdev->mr_list_head); + INIT_LIST_HEAD(&mvdev->mres.mr_list_head); if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { err = mlx5_vdpa_create_dma_mr(mvdev);