diff mbox series

[vhost,7/7] vdpa/mlx5: Postpone MR deletion

Message ID 20240821114100.2261167-9-dtatulea@nvidia.com (mailing list archive)
State New, archived
Headers show
Series vdpa/mlx5: Optimze MKEY operations | expand

Commit Message

Dragos Tatulea Aug. 21, 2024, 11:41 a.m. UTC
Currently, when a new MR is set up, the old MR is deleted. MR deletion
is about 30-40% the time of MR creation. As deleting the old MR is not
important for the process of setting up the new MR, this operation
can be postponed.

This series adds a workqueue that does MR garbage collection at a later
point. If the MR lock is taken, the handler will back off and
reschedule. The exception during shutdown: then the handler must
not postpone the work.

Note that this is only a speculative optimization: if there is some
mapping operation that is triggered while the garbage collector handler
has the lock taken, this operation it will have to wait for the handler
to finish.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
---
 drivers/vdpa/mlx5/core/mlx5_vdpa.h | 10 ++++++
 drivers/vdpa/mlx5/core/mr.c        | 51 ++++++++++++++++++++++++++++--
 drivers/vdpa/mlx5/net/mlx5_vnet.c  |  3 +-
 3 files changed, 60 insertions(+), 4 deletions(-)

Comments

Eugenio Perez Martin Aug. 29, 2024, 3:07 p.m. UTC | #1
On Wed, Aug 21, 2024 at 1:42 PM Dragos Tatulea <dtatulea@nvidia.com> wrote:
>
> Currently, when a new MR is set up, the old MR is deleted. MR deletion
> is about 30-40% the time of MR creation. As deleting the old MR is not
> important for the process of setting up the new MR, this operation
> can be postponed.
>
> This series adds a workqueue that does MR garbage collection at a later
> point. If the MR lock is taken, the handler will back off and
> reschedule. The exception during shutdown: then the handler must
> not postpone the work.
>
> Note that this is only a speculative optimization: if there is some
> mapping operation that is triggered while the garbage collector handler
> has the lock taken, this operation it will have to wait for the handler
> to finish.
>
> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
> Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
> ---
>  drivers/vdpa/mlx5/core/mlx5_vdpa.h | 10 ++++++
>  drivers/vdpa/mlx5/core/mr.c        | 51 ++++++++++++++++++++++++++++--
>  drivers/vdpa/mlx5/net/mlx5_vnet.c  |  3 +-
>  3 files changed, 60 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> index c3e17bc888e8..2cedf7e2dbc4 100644
> --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> @@ -86,8 +86,18 @@ enum {
>  struct mlx5_vdpa_mr_resources {
>         struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
>         unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
> +
> +       /* Pre-deletion mr list */
>         struct list_head mr_list_head;
> +
> +       /* Deferred mr list */
> +       struct list_head mr_gc_list_head;
> +       struct workqueue_struct *wq_gc;
> +       struct delayed_work gc_dwork_ent;
> +
>         struct mutex lock;
> +
> +       atomic_t shutdown;
>  };
>
>  struct mlx5_vdpa_dev {
> diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
> index ec75f165f832..43fce6b39cf2 100644
> --- a/drivers/vdpa/mlx5/core/mr.c
> +++ b/drivers/vdpa/mlx5/core/mr.c
> @@ -653,14 +653,46 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
>         kfree(mr);
>  }
>
> +#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
> +
> +static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
> +{
> +       struct mlx5_vdpa_mr_resources *mres;
> +       struct mlx5_vdpa_mr *mr, *tmp;
> +       struct mlx5_vdpa_dev *mvdev;
> +
> +       mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
> +
> +       if (atomic_read(&mres->shutdown)) {
> +               mutex_lock(&mres->lock);
> +       } else if (!mutex_trylock(&mres->lock)) {

Is the trylock worth it? My understanding is that mutex_lock will add
the kthread to the waitqueue anyway if it is not able to acquire the
lock.

> +               queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
> +                                  msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
> +               return;
> +       }
> +
> +       mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
> +
> +       list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
> +               _mlx5_vdpa_destroy_mr(mvdev, mr);
> +       }
> +
> +       mutex_unlock(&mres->lock);
> +}
> +
>  static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
>                               struct mlx5_vdpa_mr *mr)
>  {
> +       struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
> +
>         if (!mr)
>                 return;
>
> -       if (refcount_dec_and_test(&mr->refcount))
> -               _mlx5_vdpa_destroy_mr(mvdev, mr);
> +       if (refcount_dec_and_test(&mr->refcount)) {
> +               list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
> +               queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
> +                                  msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));

Why the delay?

> +       }
>  }
>
>  void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
> @@ -848,9 +880,17 @@ int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
>  {
>         struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
>
> -       INIT_LIST_HEAD(&mres->mr_list_head);
> +       mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
> +       if (!mres->wq_gc)
> +               return -ENOMEM;
> +
> +       INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
> +
>         mutex_init(&mres->lock);
>
> +       INIT_LIST_HEAD(&mres->mr_list_head);
> +       INIT_LIST_HEAD(&mres->mr_gc_list_head);
> +
>         return 0;
>  }
>
> @@ -858,5 +898,10 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
>  {
>         struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
>
> +       atomic_set(&mres->shutdown, 1);
> +
> +       flush_delayed_work(&mres->gc_dwork_ent);
> +       destroy_workqueue(mres->wq_gc);
> +       mres->wq_gc = NULL;
>         mutex_destroy(&mres->lock);
>  }
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 1cadcb05a5c7..ee9482ef51e6 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -3435,6 +3435,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
>         free_fixed_resources(ndev);
>         mlx5_vdpa_clean_mrs(mvdev);
>         mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
> +       mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
> +
>         if (!is_zero_ether_addr(ndev->config.mac)) {
>                 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
>                 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
> @@ -4044,7 +4046,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
>         destroy_workqueue(wq);
>         mgtdev->ndev = NULL;
>

Extra newline here.

> -       mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
>  }
>
>  static const struct vdpa_mgmtdev_ops mdev_ops = {
> --
> 2.45.1
>
Dragos Tatulea Aug. 29, 2024, 3:22 p.m. UTC | #2
On 29.08.24 17:07, Eugenio Perez Martin wrote:
> On Wed, Aug 21, 2024 at 1:42 PM Dragos Tatulea <dtatulea@nvidia.com> wrote:
>>
>> Currently, when a new MR is set up, the old MR is deleted. MR deletion
>> is about 30-40% the time of MR creation. As deleting the old MR is not
>> important for the process of setting up the new MR, this operation
>> can be postponed.
>>
>> This series adds a workqueue that does MR garbage collection at a later
>> point. If the MR lock is taken, the handler will back off and
>> reschedule. The exception during shutdown: then the handler must
>> not postpone the work.
>>
>> Note that this is only a speculative optimization: if there is some
>> mapping operation that is triggered while the garbage collector handler
>> has the lock taken, this operation it will have to wait for the handler
>> to finish.
>>
>> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
>> Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
>> ---
>>  drivers/vdpa/mlx5/core/mlx5_vdpa.h | 10 ++++++
>>  drivers/vdpa/mlx5/core/mr.c        | 51 ++++++++++++++++++++++++++++--
>>  drivers/vdpa/mlx5/net/mlx5_vnet.c  |  3 +-
>>  3 files changed, 60 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
>> index c3e17bc888e8..2cedf7e2dbc4 100644
>> --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
>> +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
>> @@ -86,8 +86,18 @@ enum {
>>  struct mlx5_vdpa_mr_resources {
>>         struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
>>         unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
>> +
>> +       /* Pre-deletion mr list */
>>         struct list_head mr_list_head;
>> +
>> +       /* Deferred mr list */
>> +       struct list_head mr_gc_list_head;
>> +       struct workqueue_struct *wq_gc;
>> +       struct delayed_work gc_dwork_ent;
>> +
>>         struct mutex lock;
>> +
>> +       atomic_t shutdown;
>>  };
>>
>>  struct mlx5_vdpa_dev {
>> diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
>> index ec75f165f832..43fce6b39cf2 100644
>> --- a/drivers/vdpa/mlx5/core/mr.c
>> +++ b/drivers/vdpa/mlx5/core/mr.c
>> @@ -653,14 +653,46 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
>>         kfree(mr);
>>  }
>>
>> +#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
>> +
>> +static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
>> +{
>> +       struct mlx5_vdpa_mr_resources *mres;
>> +       struct mlx5_vdpa_mr *mr, *tmp;
>> +       struct mlx5_vdpa_dev *mvdev;
>> +
>> +       mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
>> +
>> +       if (atomic_read(&mres->shutdown)) {
>> +               mutex_lock(&mres->lock);
>> +       } else if (!mutex_trylock(&mres->lock)) {
> 
> Is the trylock worth it? My understanding is that mutex_lock will add
> the kthread to the waitqueue anyway if it is not able to acquire the
> lock.
> 
I want to believe it is :). I noticed during testing that this can
interfere with the case where there are several .set_map() operations
in quick succession. That's why the work is delayed by such a long
time.

It's not a perfect heuristic but I found that it's better than not
having it.

>> +               queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
>> +                                  msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
>> +               return;
>> +       }
>> +
>> +       mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
>> +
>> +       list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
>> +               _mlx5_vdpa_destroy_mr(mvdev, mr);
>> +       }
>> +
>> +       mutex_unlock(&mres->lock);
>> +}
>> +
>>  static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
>>                               struct mlx5_vdpa_mr *mr)
>>  {
>> +       struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
>> +
>>         if (!mr)
>>                 return;
>>
>> -       if (refcount_dec_and_test(&mr->refcount))
>> -               _mlx5_vdpa_destroy_mr(mvdev, mr);
>> +       if (refcount_dec_and_test(&mr->refcount)) {
>> +               list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
>> +               queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
>> +                                  msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
> 
> Why the delay?
> 
See above.

>> +       }
>>  }
>>
>>  void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
>> @@ -848,9 +880,17 @@ int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
>>  {
>>         struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
>>
>> -       INIT_LIST_HEAD(&mres->mr_list_head);
>> +       mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
>> +       if (!mres->wq_gc)
>> +               return -ENOMEM;
>> +
>> +       INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
>> +
>>         mutex_init(&mres->lock);
>>
>> +       INIT_LIST_HEAD(&mres->mr_list_head);
>> +       INIT_LIST_HEAD(&mres->mr_gc_list_head);
>> +
>>         return 0;
>>  }
>>
>> @@ -858,5 +898,10 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
>>  {
>>         struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
>>
>> +       atomic_set(&mres->shutdown, 1);
>> +
>> +       flush_delayed_work(&mres->gc_dwork_ent);
>> +       destroy_workqueue(mres->wq_gc);
>> +       mres->wq_gc = NULL;
>>         mutex_destroy(&mres->lock);
>>  }
>> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> index 1cadcb05a5c7..ee9482ef51e6 100644
>> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> @@ -3435,6 +3435,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
>>         free_fixed_resources(ndev);
>>         mlx5_vdpa_clean_mrs(mvdev);
>>         mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
>> +       mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
>> +
>>         if (!is_zero_ether_addr(ndev->config.mac)) {
>>                 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
>>                 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
>> @@ -4044,7 +4046,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
>>         destroy_workqueue(wq);
>>         mgtdev->ndev = NULL;
>>
> 
> Extra newline here.
Ack.
> 
>> -       mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
>>  }
>>
>>  static const struct vdpa_mgmtdev_ops mdev_ops = {
>> --
>> 2.45.1
>>
>
Eugenio Perez Martin Aug. 29, 2024, 5:12 p.m. UTC | #3
On Thu, Aug 29, 2024 at 5:23 PM Dragos Tatulea <dtatulea@nvidia.com> wrote:
>
>
>
> On 29.08.24 17:07, Eugenio Perez Martin wrote:
> > On Wed, Aug 21, 2024 at 1:42 PM Dragos Tatulea <dtatulea@nvidia.com> wrote:
> >>
> >> Currently, when a new MR is set up, the old MR is deleted. MR deletion
> >> is about 30-40% the time of MR creation. As deleting the old MR is not
> >> important for the process of setting up the new MR, this operation
> >> can be postponed.
> >>
> >> This series adds a workqueue that does MR garbage collection at a later
> >> point. If the MR lock is taken, the handler will back off and
> >> reschedule. The exception during shutdown: then the handler must
> >> not postpone the work.
> >>
> >> Note that this is only a speculative optimization: if there is some
> >> mapping operation that is triggered while the garbage collector handler
> >> has the lock taken, this operation it will have to wait for the handler
> >> to finish.
> >>
> >> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
> >> Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
> >> ---
> >>  drivers/vdpa/mlx5/core/mlx5_vdpa.h | 10 ++++++
> >>  drivers/vdpa/mlx5/core/mr.c        | 51 ++++++++++++++++++++++++++++--
> >>  drivers/vdpa/mlx5/net/mlx5_vnet.c  |  3 +-
> >>  3 files changed, 60 insertions(+), 4 deletions(-)
> >>
> >> diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> >> index c3e17bc888e8..2cedf7e2dbc4 100644
> >> --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> >> +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> >> @@ -86,8 +86,18 @@ enum {
> >>  struct mlx5_vdpa_mr_resources {
> >>         struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
> >>         unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
> >> +
> >> +       /* Pre-deletion mr list */
> >>         struct list_head mr_list_head;
> >> +
> >> +       /* Deferred mr list */
> >> +       struct list_head mr_gc_list_head;
> >> +       struct workqueue_struct *wq_gc;
> >> +       struct delayed_work gc_dwork_ent;
> >> +
> >>         struct mutex lock;
> >> +
> >> +       atomic_t shutdown;
> >>  };
> >>
> >>  struct mlx5_vdpa_dev {
> >> diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
> >> index ec75f165f832..43fce6b39cf2 100644
> >> --- a/drivers/vdpa/mlx5/core/mr.c
> >> +++ b/drivers/vdpa/mlx5/core/mr.c
> >> @@ -653,14 +653,46 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
> >>         kfree(mr);
> >>  }
> >>
> >> +#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
> >> +
> >> +static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
> >> +{
> >> +       struct mlx5_vdpa_mr_resources *mres;
> >> +       struct mlx5_vdpa_mr *mr, *tmp;
> >> +       struct mlx5_vdpa_dev *mvdev;
> >> +
> >> +       mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
> >> +
> >> +       if (atomic_read(&mres->shutdown)) {
> >> +               mutex_lock(&mres->lock);
> >> +       } else if (!mutex_trylock(&mres->lock)) {
> >
> > Is the trylock worth it? My understanding is that mutex_lock will add
> > the kthread to the waitqueue anyway if it is not able to acquire the
> > lock.
> >
> I want to believe it is :). I noticed during testing that this can
> interfere with the case where there are several .set_map() operations
> in quick succession. That's why the work is delayed by such a long
> time.
>
> It's not a perfect heuristic but I found that it's better than not
> having it.
>

Understood, thanks for explaining! Can you add the explanation to the macro?

It would be great to find a mechanism so the work is added in low
priority fashion, but I don't know any.

> >> +               queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
> >> +                                  msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
> >> +               return;
> >> +       }
> >> +
> >> +       mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
> >> +
> >> +       list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
> >> +               _mlx5_vdpa_destroy_mr(mvdev, mr);
> >> +       }
> >> +
> >> +       mutex_unlock(&mres->lock);
> >> +}
> >> +
> >>  static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
> >>                               struct mlx5_vdpa_mr *mr)
> >>  {
> >> +       struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
> >> +
> >>         if (!mr)
> >>                 return;
> >>
> >> -       if (refcount_dec_and_test(&mr->refcount))
> >> -               _mlx5_vdpa_destroy_mr(mvdev, mr);
> >> +       if (refcount_dec_and_test(&mr->refcount)) {
> >> +               list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
> >> +               queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
> >> +                                  msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
> >
> > Why the delay?
> >
> See above.
>
> >> +       }
> >>  }
> >>
> >>  void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
> >> @@ -848,9 +880,17 @@ int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
> >>  {
> >>         struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
> >>
> >> -       INIT_LIST_HEAD(&mres->mr_list_head);
> >> +       mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
> >> +       if (!mres->wq_gc)
> >> +               return -ENOMEM;
> >> +
> >> +       INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
> >> +
> >>         mutex_init(&mres->lock);
> >>
> >> +       INIT_LIST_HEAD(&mres->mr_list_head);
> >> +       INIT_LIST_HEAD(&mres->mr_gc_list_head);
> >> +
> >>         return 0;
> >>  }
> >>
> >> @@ -858,5 +898,10 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
> >>  {
> >>         struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
> >>
> >> +       atomic_set(&mres->shutdown, 1);
> >> +
> >> +       flush_delayed_work(&mres->gc_dwork_ent);
> >> +       destroy_workqueue(mres->wq_gc);
> >> +       mres->wq_gc = NULL;
> >>         mutex_destroy(&mres->lock);
> >>  }
> >> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> >> index 1cadcb05a5c7..ee9482ef51e6 100644
> >> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> >> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> >> @@ -3435,6 +3435,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
> >>         free_fixed_resources(ndev);
> >>         mlx5_vdpa_clean_mrs(mvdev);
> >>         mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
> >> +       mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
> >> +
> >>         if (!is_zero_ether_addr(ndev->config.mac)) {
> >>                 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
> >>                 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
> >> @@ -4044,7 +4046,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
> >>         destroy_workqueue(wq);
> >>         mgtdev->ndev = NULL;
> >>
> >
> > Extra newline here.
> Ack.
> >
> >> -       mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
> >>  }
> >>
> >>  static const struct vdpa_mgmtdev_ops mdev_ops = {
> >> --
> >> 2.45.1
> >>
> >
>
diff mbox series

Patch

diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index c3e17bc888e8..2cedf7e2dbc4 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -86,8 +86,18 @@  enum {
 struct mlx5_vdpa_mr_resources {
 	struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
 	unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+
+	/* Pre-deletion mr list */
 	struct list_head mr_list_head;
+
+	/* Deferred mr list */
+	struct list_head mr_gc_list_head;
+	struct workqueue_struct *wq_gc;
+	struct delayed_work gc_dwork_ent;
+
 	struct mutex lock;
+
+	atomic_t shutdown;
 };
 
 struct mlx5_vdpa_dev {
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index ec75f165f832..43fce6b39cf2 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -653,14 +653,46 @@  static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
 	kfree(mr);
 }
 
+#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
+
+static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
+{
+	struct mlx5_vdpa_mr_resources *mres;
+	struct mlx5_vdpa_mr *mr, *tmp;
+	struct mlx5_vdpa_dev *mvdev;
+
+	mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
+
+	if (atomic_read(&mres->shutdown)) {
+		mutex_lock(&mres->lock);
+	} else if (!mutex_trylock(&mres->lock)) {
+		queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+				   msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+		return;
+	}
+
+	mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
+
+	list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
+		_mlx5_vdpa_destroy_mr(mvdev, mr);
+	}
+
+	mutex_unlock(&mres->lock);
+}
+
 static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
 			      struct mlx5_vdpa_mr *mr)
 {
+	struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
 	if (!mr)
 		return;
 
-	if (refcount_dec_and_test(&mr->refcount))
-		_mlx5_vdpa_destroy_mr(mvdev, mr);
+	if (refcount_dec_and_test(&mr->refcount)) {
+		list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
+		queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+				   msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+	}
 }
 
 void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
@@ -848,9 +880,17 @@  int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
 {
 	struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
 
-	INIT_LIST_HEAD(&mres->mr_list_head);
+	mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
+	if (!mres->wq_gc)
+		return -ENOMEM;
+
+	INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
+
 	mutex_init(&mres->lock);
 
+	INIT_LIST_HEAD(&mres->mr_list_head);
+	INIT_LIST_HEAD(&mres->mr_gc_list_head);
+
 	return 0;
 }
 
@@ -858,5 +898,10 @@  void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
 {
 	struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
 
+	atomic_set(&mres->shutdown, 1);
+
+	flush_delayed_work(&mres->gc_dwork_ent);
+	destroy_workqueue(mres->wq_gc);
+	mres->wq_gc = NULL;
 	mutex_destroy(&mres->lock);
 }
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 1cadcb05a5c7..ee9482ef51e6 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3435,6 +3435,8 @@  static void mlx5_vdpa_free(struct vdpa_device *vdev)
 	free_fixed_resources(ndev);
 	mlx5_vdpa_clean_mrs(mvdev);
 	mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+	mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+
 	if (!is_zero_ether_addr(ndev->config.mac)) {
 		pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
 		mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -4044,7 +4046,6 @@  static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
 	destroy_workqueue(wq);
 	mgtdev->ndev = NULL;
 
-	mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
 }
 
 static const struct vdpa_mgmtdev_ops mdev_ops = {