@@ -2323,6 +2323,7 @@ const struct file_operations fuse_dev_operations = {
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .mmap = fuse_uring_mmap,
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
@@ -496,3 +496,52 @@ void fuse_uring_ring_destruct(struct fuse_conn *fc)
fc->ring.queue_depth = 0;
fc->ring.nr_queues = 0;
}
+
+/**
+ * fuse uring mmap, per ring qeuue. The queue is identified by the offset
+ * parameter
+ */
+int fuse_uring_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fuse_dev *fud = fuse_get_dev(filp);
+ struct fuse_conn *fc = fud->fc;
+ size_t sz = vma->vm_end - vma->vm_start;
+ unsigned int qid;
+ int ret;
+ loff_t off;
+ struct fuse_ring_queue *queue;
+
+ /* check if uring is configured and if the requested size matches */
+ if (fc->ring.nr_queues == 0 || fc->ring.queue_depth == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sz != fc->ring.queue_buf_size) {
+ ret = -EINVAL;
+ pr_devel("mmap size mismatch, expected %zu got %zu\n",
+ fc->ring.queue_buf_size, sz);
+ goto out;
+ }
+
+ /* XXX: Enforce a cloned session per ring and assign fud per queue
+ * and use fud as key to find the right queue?
+ */
+ off = (vma->vm_pgoff << PAGE_SHIFT) / PAGE_SIZE;
+ qid = off / (fc->ring.queue_depth);
+
+ queue = fuse_uring_get_queue(fc, qid);
+
+ if (queue == NULL) {
+ pr_devel("fuse uring mmap: invalid qid=%u\n", qid);
+ return -ERANGE;
+ }
+
+ ret = remap_vmalloc_range(vma, queue->queue_req_buf, 0);
+out:
+ pr_devel("%s: pid %d qid: %u addr: %p sz: %zu ret: %d\n",
+ __func__, current->pid, qid, (char *)vma->vm_start,
+ sz, ret);
+
+ return ret;
+}
@@ -12,6 +12,7 @@
void fuse_uring_end_requests(struct fuse_conn *fc);
int fuse_uring_ioctl(struct file *file, struct fuse_uring_cfg *cfg);
void fuse_uring_ring_destruct(struct fuse_conn *fc);
+int fuse_uring_mmap(struct file *filp, struct vm_area_struct *vma);
#endif
This adds the uring mmap method. Mmap is currently done per ring queue, the queue is identified using the offset parameter. Reason to have an mmap per queue is to have a numa aware allocation per queue. Trade off is that the offset limits the number of possible queues (although a very high number) and it might cause issues if another mmap is later on needed. Signed-off-by: Bernd Schubert <bschubert@ddn.com> cc: Miklos Szeredi <miklos@szeredi.hu> cc: linux-fsdevel@vger.kernel.org cc: Amir Goldstein <amir73il@gmail.com> cc: fuse-devel@lists.sourceforge.net --- fs/fuse/dev.c | 1 + fs/fuse/dev_uring.c | 49 +++++++++++++++++++++++++++++++++++++++++++ fs/fuse/dev_uring_i.h | 1 + 3 files changed, 51 insertions(+)