diff mbox series

[v5,2/6] vhost: Add the vhost_worker to support kthread

Message ID 20241230124445.1850997-3-lulu@redhat.com (mailing list archive)
State Not Applicable
Headers show
Series vhost: Add support of kthread API | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Cindy Lu Dec. 30, 2024, 12:43 p.m. UTC
Add the previously removed function vhost_worker() back to support the
kthread and rename it to vhost_run_work_kthread_list.

The old function vhost_worker() was changed to support tasks in
commit 6e890c5d5021 ("vhost: use vhost_tasks for worker threads")
and to support multiple workers per device using xarray in
commit 1cdaafa1b8b4 ("vhost: replace single worker pointer with xarray").

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 drivers/vhost/vhost.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

Comments

Jason Wang Jan. 2, 2025, 3:19 a.m. UTC | #1
On Mon, Dec 30, 2024 at 8:45 PM Cindy Lu <lulu@redhat.com> wrote:
>
> Add the previously removed function vhost_worker() back to support the
> kthread and rename it to vhost_run_work_kthread_list.
>
> The old function vhost_worker() was changed to support tasks in
> commit 6e890c5d5021 ("vhost: use vhost_tasks for worker threads")
> and to support multiple workers per device using xarray in
> commit 1cdaafa1b8b4 ("vhost: replace single worker pointer with xarray").
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>

I think we need to tweak the title as this patch just brings back the
kthread worker?

Other than that,

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks

> ---
>  drivers/vhost/vhost.c | 38 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 38 insertions(+)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index eaddbd39c29b..1feba29abf95 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -388,6 +388,44 @@ static void vhost_vq_reset(struct vhost_dev *dev,
>         __vhost_vq_meta_reset(vq);
>  }
>
> +static int vhost_run_work_kthread_list(void *data)
> +{
> +       struct vhost_worker *worker = data;
> +       struct vhost_work *work, *work_next;
> +       struct vhost_dev *dev = worker->dev;
> +       struct llist_node *node;
> +
> +       kthread_use_mm(dev->mm);
> +
> +       for (;;) {
> +               /* mb paired w/ kthread_stop */
> +               set_current_state(TASK_INTERRUPTIBLE);
> +
> +               if (kthread_should_stop()) {
> +                       __set_current_state(TASK_RUNNING);
> +                       break;
> +               }
> +               node = llist_del_all(&worker->work_list);
> +               if (!node)
> +                       schedule();
> +
> +               node = llist_reverse_order(node);
> +               /* make sure flag is seen after deletion */
> +               smp_wmb();
> +               llist_for_each_entry_safe(work, work_next, node, node) {
> +                       clear_bit(VHOST_WORK_QUEUED, &work->flags);
> +                       __set_current_state(TASK_RUNNING);
> +                       kcov_remote_start_common(worker->kcov_handle);
> +                       work->fn(work);
> +                       kcov_remote_stop();
> +                       cond_resched();
> +               }
> +       }
> +       kthread_unuse_mm(dev->mm);
> +
> +       return 0;
> +}
> +
>  static bool vhost_run_work_list(void *data)
>  {
>         struct vhost_worker *worker = data;
> --
> 2.45.0
>
diff mbox series

Patch

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eaddbd39c29b..1feba29abf95 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -388,6 +388,44 @@  static void vhost_vq_reset(struct vhost_dev *dev,
 	__vhost_vq_meta_reset(vq);
 }
 
+static int vhost_run_work_kthread_list(void *data)
+{
+	struct vhost_worker *worker = data;
+	struct vhost_work *work, *work_next;
+	struct vhost_dev *dev = worker->dev;
+	struct llist_node *node;
+
+	kthread_use_mm(dev->mm);
+
+	for (;;) {
+		/* mb paired w/ kthread_stop */
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (kthread_should_stop()) {
+			__set_current_state(TASK_RUNNING);
+			break;
+		}
+		node = llist_del_all(&worker->work_list);
+		if (!node)
+			schedule();
+
+		node = llist_reverse_order(node);
+		/* make sure flag is seen after deletion */
+		smp_wmb();
+		llist_for_each_entry_safe(work, work_next, node, node) {
+			clear_bit(VHOST_WORK_QUEUED, &work->flags);
+			__set_current_state(TASK_RUNNING);
+			kcov_remote_start_common(worker->kcov_handle);
+			work->fn(work);
+			kcov_remote_stop();
+			cond_resched();
+		}
+	}
+	kthread_unuse_mm(dev->mm);
+
+	return 0;
+}
+
 static bool vhost_run_work_list(void *data)
 {
 	struct vhost_worker *worker = data;