@@ -39,6 +39,7 @@
#ifdef __linux__
#include <linux/fs.h>
+#include <linux/fuse.h>
#endif
/* Prevent overly long bounce buffer allocations */
@@ -49,7 +50,6 @@ typedef struct FuseExport {
BlockExport common;
struct fuse_session *fuse_session;
- struct fuse_buf fuse_buf;
unsigned int in_flight; /* atomic */
bool mounted, fd_handler_set_up;
@@ -64,6 +64,14 @@ typedef struct FuseExport {
gid_t st_gid;
} FuseExport;
+typedef struct FuseIORequest {
+ fuse_req_t req;
+ size_t size;
+ off_t offset;
+ FuseExport *exp;
+ char *write_buf;
+} FuseIORequest;
+
static GHashTable *exports;
static const struct fuse_lowlevel_ops fuse_ops;
@@ -288,6 +296,7 @@ fail:
static void read_from_fuse_export(void *opaque)
{
FuseExport *exp = opaque;
+ struct fuse_buf buf = {};
int ret;
blk_exp_ref(&exp->common);
@@ -295,20 +304,30 @@ static void read_from_fuse_export(void *opaque)
qatomic_inc(&exp->in_flight);
do {
- ret = fuse_session_receive_buf(exp->fuse_session, &exp->fuse_buf);
+ ret = fuse_session_receive_buf(exp->fuse_session, &buf);
} while (ret == -EINTR);
if (ret < 0) {
goto out;
}
- fuse_session_process_buf(exp->fuse_session, &exp->fuse_buf);
+ fuse_session_process_buf(exp->fuse_session, &buf);
out:
+ struct fuse_in_header *in = (struct fuse_in_header *)buf.mem;
+
+ if (in->opcode == FUSE_WRITE || in->opcode == FUSE_READ) {
+ g_free(buf.mem);
+ return;
+ }
+
if (qatomic_fetch_dec(&exp->in_flight) == 1) {
aio_wait_kick(); /* wake AIO_WAIT_WHILE() */
}
blk_exp_unref(&exp->common);
+
+ g_free(buf.mem);
+
}
static void fuse_export_shutdown(BlockExport *blk_exp)
@@ -347,7 +366,6 @@ static void fuse_export_delete(BlockExport *blk_exp)
fuse_session_destroy(exp->fuse_session);
}
- free(exp->fuse_buf.mem);
g_free(exp->mountpoint);
}
@@ -570,102 +588,159 @@ static void fuse_open(fuse_req_t req, fuse_ino_t inode,
fuse_reply_open(req, fi);
}
-/**
- * Handle client reads from the exported image.
- */
-static void fuse_read(fuse_req_t req, fuse_ino_t inode,
- size_t size, off_t offset, struct fuse_file_info *fi)
+static void coroutine_fn fuse_read_coroutine(void *opaque)
{
- FuseExport *exp = fuse_req_userdata(req);
+ FuseIORequest *io_req = opaque;
+ FuseExport *exp = io_req->exp;
int64_t length;
- void *buf;
+ void *buffer;
int ret;
- /* Limited by max_read, should not happen */
- if (size > FUSE_MAX_BOUNCE_BYTES) {
- fuse_reply_err(req, EINVAL);
- return;
+ if (io_req->size > FUSE_MAX_BOUNCE_BYTES) {
+ fuse_reply_err(io_req->req, EINVAL);
+ goto cleanup;
}
- /**
- * Clients will expect short reads at EOF, so we have to limit
- * offset+size to the image length.
- */
length = blk_getlength(exp->common.blk);
if (length < 0) {
- fuse_reply_err(req, -length);
- return;
+ fuse_reply_err(io_req->req, -length);
+ goto cleanup;
}
- if (offset + size > length) {
- size = length - offset;
+ if (io_req->offset + io_req->size > length) {
+ io_req->size = length - io_req->offset;
}
- buf = qemu_try_blockalign(blk_bs(exp->common.blk), size);
- if (!buf) {
- fuse_reply_err(req, ENOMEM);
- return;
+ if (io_req->size == 0) {
+ fuse_reply_buf(io_req->req, NULL, 0);
+ goto cleanup;
}
- ret = blk_pread(exp->common.blk, offset, size, buf, 0);
+ buffer = qemu_try_blockalign(blk_bs(exp->common.blk), io_req->size);
+ if (!buffer) {
+ fuse_reply_err(io_req->req, ENOMEM);
+ goto cleanup;
+ }
+
+ ret = blk_co_pread(exp->common.blk, io_req->offset,
+ io_req->size, buffer, 0);
if (ret >= 0) {
- fuse_reply_buf(req, buf, size);
+ fuse_reply_buf(io_req->req, buffer, io_req->size);
} else {
- fuse_reply_err(req, -ret);
+ fuse_reply_err(io_req->req, -ret);
+ }
+
+ qemu_vfree(buffer);
+
+cleanup:
+ if (qatomic_fetch_dec(&exp->in_flight) == 1) {
+ aio_wait_kick(); /* wake AIO_WAIT_WHILE() */
}
- qemu_vfree(buf);
+ blk_exp_unref(&exp->common);
+
+ g_free(io_req);
}
-/**
- * Handle client writes to the exported image.
- */
-static void fuse_write(fuse_req_t req, fuse_ino_t inode, const char *buf,
- size_t size, off_t offset, struct fuse_file_info *fi)
+static void coroutine_fn fuse_write_coroutine(void *opaque)
{
- FuseExport *exp = fuse_req_userdata(req);
+ FuseIORequest *io_req = opaque;
+ FuseExport *exp = io_req->exp;
int64_t length;
int ret;
- /* Limited by max_write, should not happen */
- if (size > BDRV_REQUEST_MAX_BYTES) {
- fuse_reply_err(req, EINVAL);
- return;
+ if (io_req->size > BDRV_REQUEST_MAX_BYTES) {
+ fuse_reply_err(io_req->req, EINVAL);
+ goto cleanup;
}
if (!exp->writable) {
- fuse_reply_err(req, EACCES);
- return;
+ fuse_reply_err(io_req->req, EACCES);
+ goto cleanup;
}
- /**
- * Clients will expect short writes at EOF, so we have to limit
- * offset+size to the image length.
- */
length = blk_getlength(exp->common.blk);
if (length < 0) {
- fuse_reply_err(req, -length);
- return;
+ fuse_reply_err(io_req->req, -length);
+ goto cleanup;
}
- if (offset + size > length) {
+ if (io_req->offset + io_req->size > length) {
if (exp->growable) {
- ret = fuse_do_truncate(exp, offset + size, true, PREALLOC_MODE_OFF);
+ ret = fuse_do_truncate(exp, io_req->offset + io_req->size,
+ true, PREALLOC_MODE_OFF);
if (ret < 0) {
- fuse_reply_err(req, -ret);
- return;
+ fuse_reply_err(io_req->req, -ret);
+ goto cleanup;
}
} else {
- size = length - offset;
+ io_req->size = MAX(0, length - io_req->offset);
+ if (io_req->size == 0) {
+ fuse_reply_write(io_req->req, 0);
+ goto cleanup;
+ }
}
}
- ret = blk_pwrite(exp->common.blk, offset, size, buf, 0);
+ ret = blk_co_pwrite(exp->common.blk, io_req->offset, io_req->size,
+ io_req->write_buf, 0);
if (ret >= 0) {
- fuse_reply_write(req, size);
+ fuse_reply_write(io_req->req, io_req->size);
} else {
- fuse_reply_err(req, -ret);
+ fuse_reply_err(io_req->req, -ret);
+ }
+
+cleanup:
+ if (qatomic_fetch_dec(&exp->in_flight) == 1) {
+ aio_wait_kick(); /* wake AIO_WAIT_WHILE() */
}
+
+ blk_exp_unref(&exp->common);
+
+ g_free(io_req->write_buf);
+ g_free(io_req);
+}
+
+/**
+ * Handle client reads from the exported image.
+ */
+static void fuse_read(fuse_req_t req, fuse_ino_t inode,
+ size_t size, off_t offset, struct fuse_file_info *fi)
+{
+ FuseExport *exp = fuse_req_userdata(req);
+ FuseIORequest *io_req = g_new(FuseIORequest, 1);
+
+ *io_req = (FuseIORequest) {
+ .req = req,
+ .size = size,
+ .offset = offset,
+ .exp = exp,
+ };
+
+ Coroutine *co = qemu_coroutine_create(fuse_read_coroutine, io_req);
+ qemu_coroutine_enter(co);
+}
+
+
+/**
+ * Handle client writes to the exported image.
+ */
+static void fuse_write(fuse_req_t req, fuse_ino_t inode, const char *buf,
+ size_t size, off_t offset, struct fuse_file_info *fi)
+{
+ FuseExport *exp = fuse_req_userdata(req);
+ FuseIORequest *io_req = g_new(FuseIORequest, 1);
+
+ *io_req = (FuseIORequest) {
+ .req = req,
+ .size = size,
+ .offset = offset,
+ .exp = exp,
+ .write_buf = g_memdup2_qemu(buf, size),
+ };
+
+ Coroutine *co = qemu_coroutine_create(fuse_write_coroutine, io_req);
+ qemu_coroutine_enter(co);
}
/**
Signed-off-by: Changzhi Xie <sa.z@qq.com> This commit refactors the FUSE export to process read and write operations using coroutines, improving concurrency and avoiding blocking the main loop. The main changes include: 1. Introduce FuseIORequest structure to encapsulate I/O parameters and state 2. Move read/write processing into coroutine fuse_read_coroutine and fuse_write_coroutine 3. Use blk_co_pread/pwrite for async block layer access --- block/export/fuse.c | 189 +++++++++++++++++++++++++++++++------------- 1 file changed, 132 insertions(+), 57 deletions(-)