diff mbox series

[RFC,v3,07/17] fuse: {uring} Add a dev_release exception for fuse-over-io-uring

Message ID 20240901-b4-fuse-uring-rfcv3-without-mmap-v3-7-9207f7391444@ddn.com (mailing list archive)
State New
Headers show
Series fuse: fuse-over-io-uring | expand

Commit Message

Bernd Schubert Sept. 1, 2024, 1:37 p.m. UTC
fuse-over-io-uring needs an implicit device clone, which is done per
queue to avoid hanging "umount" when daemon side is already terminated.
Reason is that fuse_dev_release() is not called when there are queued
(waiting) io_uring commands.
Solution is the implicit device clone and an exception in fuse_dev_release
for uring devices to abort the connection when only uring device
are left.

Signed-off-by: Bernd Schubert <bschubert@ddn.com>
---
 fs/fuse/dev.c         | 34 ++++++++++++++++++++++++++++++++--
 fs/fuse/dev_uring_i.h | 24 +++++++++++++++++-------
 2 files changed, 49 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 06ea4dc5ffe1..fec995818a9e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2258,6 +2258,8 @@  int fuse_dev_release(struct inode *inode, struct file *file)
 		struct fuse_pqueue *fpq = &fud->pq;
 		LIST_HEAD(to_end);
 		unsigned int i;
+		int dev_cnt;
+		bool abort_conn = false;
 
 		spin_lock(&fpq->lock);
 		WARN_ON(!list_empty(&fpq->io));
@@ -2267,8 +2269,36 @@  int fuse_dev_release(struct inode *inode, struct file *file)
 
 		fuse_dev_end_requests(&to_end);
 
-		/* Are we the last open device? */
-		if (atomic_dec_and_test(&fc->dev_count)) {
+		/* Are we the last open device?  */
+		dev_cnt = atomic_dec_return(&fc->dev_count);
+		if (dev_cnt == 0)
+			abort_conn = true;
+
+#ifdef CONFIG_FUSE_IO_URING
+		/*
+		 * Or is this with io_uring and only ring devices left?
+		 * These devices will not receive a ->release() as long as
+		 * there are io_uring_cmd's waiting and not completed
+		 * with io_uring_cmd_done yet
+		 */
+		if (fuse_uring_configured(fc)) {
+			struct fuse_dev *list_dev;
+			bool all_uring = true;
+
+			spin_lock(&fc->lock);
+			list_for_each_entry(list_dev, &fc->devices, entry) {
+				if (list_dev == fud)
+					continue;
+				if (!list_dev->ring_q)
+					all_uring = false;
+			}
+			spin_unlock(&fc->lock);
+			if (all_uring)
+				abort_conn = true;
+		}
+#endif
+
+		if (abort_conn) {
 			WARN_ON(fc->iq.fasync != NULL);
 			fuse_abort_conn(fc);
 		}
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
index 301b37d16506..26266f923321 100644
--- a/fs/fuse/dev_uring_i.h
+++ b/fs/fuse/dev_uring_i.h
@@ -15,10 +15,10 @@ 
 #define FUSE_URING_MAX_QUEUE_DEPTH 32768
 
 enum fuse_ring_req_state {
+	FRRS_INVALID = 0,
 
 	/* request is basially initialized */
-	FRRS_INIT = 1,
-
+	FRRS_INIT,
 };
 
 /* A fuse ring entry, part of the ring queue */
@@ -29,11 +29,8 @@  struct fuse_ring_ent {
 	/* array index in the ring-queue */
 	unsigned int tag;
 
-	/*
-	 * state the request is currently in
-	 * (enum fuse_ring_req_state)
-	 */
-	unsigned long state;
+	/* state the request is currently in */
+	enum fuse_ring_req_state state;
 };
 
 struct fuse_ring_queue {
@@ -108,6 +105,14 @@  fuse_uring_get_queue(struct fuse_ring *ring, int qid)
 	return (struct fuse_ring_queue *)(ptr + qid * ring->queue_size);
 }
 
+static inline bool fuse_uring_configured(struct fuse_conn *fc)
+{
+	if (fc->ring != NULL)
+		return true;
+
+	return false;
+}
+
 #else /* CONFIG_FUSE_IO_URING */
 
 struct fuse_ring;
@@ -121,6 +126,11 @@  static inline void fuse_uring_conn_destruct(struct fuse_conn *fc)
 {
 }
 
+static inline bool fuse_uring_configured(struct fuse_conn *fc)
+{
+	return false;
+}
+
 #endif /* CONFIG_FUSE_IO_URING */
 
 #endif /* _FS_FUSE_DEV_URING_I_H */