@@ -44,6 +44,18 @@ static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
return count;
}
+static ssize_t fuse_conn_resend_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
+
+ if (fc) {
+ fuse_resend_pqueue(fc);
+ fuse_conn_put(fc);
+ }
+ return count;
+}
+
static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
@@ -190,6 +202,12 @@ static const struct file_operations fuse_ctl_abort_ops = {
.llseek = no_llseek,
};
+static const struct file_operations fuse_ctl_resend_ops = {
+ .open = nonseekable_open,
+ .write = fuse_conn_resend_write,
+ .llseek = no_llseek,
+};
+
static const struct file_operations fuse_ctl_waiting_ops = {
.open = nonseekable_open,
.read = fuse_conn_waiting_read,
@@ -274,6 +292,8 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
NULL, &fuse_ctl_waiting_ops) ||
!fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
NULL, &fuse_ctl_abort_ops) ||
+ !fuse_ctl_add_dentry(parent, fc, "resend", S_IFREG | 0200, 1,
+ NULL, &fuse_ctl_resend_ops) ||
!fuse_ctl_add_dentry(parent, fc, "max_background", S_IFREG | 0600,
1, NULL, &fuse_conn_max_background_ops) ||
!fuse_ctl_add_dentry(parent, fc, "congestion_threshold",
@@ -2223,6 +2223,65 @@ int fuse_dev_release(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL_GPL(fuse_dev_release);
+/*
+ * Resending all processing queue requests.
+ *
+ * In the event of a FUSE daemon panic and failover, we aim to minimize the
+ * impact on applications by reusing the existing FUSE connection. During this
+ * process, another daemon is employed to preserve the FUSE connection's file
+ * descriptor.
+ *
+ * However, it is possible for some inflight requests to be lost and never
+ * returned. As a result, applications awaiting replies would become stuck
+ * forever. To address this, we can resend these pending requests to the FUSE
+ * daemon, ensuring they are properly processed again.
+ *
+ * Please note that this strategy is applicable only to idempotent requests or
+ * if the FUSE daemon takes careful measures to avoid processing duplicated
+ * non-idempotent requests.
+ */
+void fuse_resend_pqueue(struct fuse_conn *fc)
+{
+ struct fuse_dev *fud;
+ struct fuse_req *req, *next;
+ struct fuse_iqueue *fiq = &fc->iq;
+ LIST_HEAD(to_queue);
+ unsigned int i;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+
+ list_for_each_entry(fud, &fc->devices, entry) {
+ struct fuse_pqueue *fpq = &fud->pq;
+
+ spin_lock(&fpq->lock);
+ list_for_each_entry_safe(req, next, &fpq->io, list) {
+ spin_lock(&req->waitq.lock);
+ if (!test_bit(FR_LOCKED, &req->flags)) {
+ __fuse_get_request(req);
+ list_move(&req->list, &to_queue);
+ }
+ spin_unlock(&req->waitq.lock);
+ }
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ list_splice_tail_init(&fpq->processing[i], &to_queue);
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ list_for_each_entry_safe(req, next, &to_queue, list) {
+ __set_bit(FR_PENDING, &req->flags);
+ }
+
+ spin_lock(&fiq->lock);
+ /* iq and pq requests are both oldest to newest */
+ list_splice(&to_queue, &fiq->pending);
+ fiq->ops->wake_pending_and_unlock(fiq);
+}
+
static int fuse_dev_fasync(int fd, struct file *file, int on)
{
struct fuse_dev *fud = fuse_get_dev(file);
@@ -45,7 +45,7 @@
#define FUSE_NAME_MAX 1024
/** Number of dentries for each connection in the control filesystem */
-#define FUSE_CTL_NUM_DENTRIES 5
+#define FUSE_CTL_NUM_DENTRIES 6
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -1122,6 +1122,9 @@ void fuse_request_end(struct fuse_req *req);
void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
+/* Resend all requests in processing queue so they can represent to userspace */
+void fuse_resend_pqueue(struct fuse_conn *fc);
+
/**
* Invalidate inode attributes
*/