diff mbox series

[09/13] fuse: Add wait stop ioctl support to the ring

Message ID 20230321011047.3425786-10-bschubert@ddn.com (mailing list archive)
State Mainlined, archived
Headers show
Series fuse uring communication | expand

Commit Message

Bernd Schubert March 21, 2023, 1:10 a.m. UTC
This is an optional ioctl to avoid running the stop monitor
(delayed workq) at run time in intervals - saves cpu cycles.
When the FUSE_DEV_IOC_URING ioctl with subcommand
FUSE_URING_IOCTL_CMD_WAIT is received it cancels the stop monitor
(delayed workq) and then goes into a an interruptible waitq - on
process termination it gets woken up and schedules the stop monitor
again.
As the submitting thread is waiting forever in a waitq,
userspace daemon side has to create a separate thread for it.

The additional ioctl subcommand FUSE_URING_IOCTL_CMD_STOP exits
to let userspace explicitly initiate fuse uring shutdown and to wake up
the FUSE_URING_IOCTL_CMD_WAIT waiting thread.

Signed-off-by: Bernd Schubert <bschubert@ddn.com>
cc: Miklos Szeredi <miklos@szeredi.hu>
cc: linux-fsdevel@vger.kernel.org
cc: Amir Goldstein <amir73il@gmail.com>
cc: fuse-devel@lists.sourceforge.net
---
 fs/fuse/dev_uring.c | 47 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)
diff mbox series

Patch

diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index ade341d86c03..e19c652e7071 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -425,6 +425,49 @@  static int fuse_uring_cfg(struct fuse_conn *fc, unsigned int qid,
 	return rc;
 }
 
+/**
+ * Wait until uring shall be destructed and then release uring resources
+ */
+static int fuse_uring_wait_stop(struct fuse_conn *fc)
+{
+	struct fuse_iqueue *fiq = &fc->iq;
+
+	pr_devel("%s stop_requested=%d", __func__, fc->ring.stop_requested);
+
+	if (fc->ring.stop_requested)
+		return -EINTR;
+
+	/* This userspace thread can stop uring on process stop, no need
+	 * for the interval worker
+	 */
+	pr_devel("%s cancel stop monitor\n", __func__);
+	cancel_delayed_work_sync(&fc->ring.stop_monitor);
+
+	wait_event_interruptible(fc->ring.stop_waitq,
+				 !fiq->connected ||
+				 fc->ring.stop_requested);
+
+	/* The userspace task gets scheduled to back userspace, we need
+	 * the interval worker again. It runs immediately for quick cleanup
+	 * in shutdown/process kill.
+	 */
+
+	mutex_lock(&fc->ring.start_stop_lock);
+	if (!fc->ring.queues_stopped)
+		mod_delayed_work(system_wq, &fc->ring.stop_monitor, 0);
+	mutex_unlock(&fc->ring.start_stop_lock);
+
+	return 0;
+}
+
+static int fuse_uring_shutdown_wakeup(struct fuse_conn *fc)
+{
+	fc->ring.stop_requested = 1;
+	wake_up_all(&fc->ring.stop_waitq);
+
+	return 0;
+}
+
 int fuse_uring_ioctl(struct file *file, struct fuse_uring_cfg *cfg)
 {
 	struct fuse_dev *fud = fuse_get_dev(file);
@@ -443,6 +486,10 @@  int fuse_uring_ioctl(struct file *file, struct fuse_uring_cfg *cfg)
 	switch (cfg->cmd) {
 	case FUSE_URING_IOCTL_CMD_QUEUE_CFG:
 		return fuse_uring_cfg(fc, cfg->qid, cfg);
+	case FUSE_URING_IOCTL_CMD_WAIT:
+		return fuse_uring_wait_stop(fc);
+	case FUSE_URING_IOCTL_CMD_STOP:
+		return fuse_uring_shutdown_wakeup(fc);
 	default:
 		return -EINVAL;
 	}