@@ -3111,6 +3111,10 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
}
file->private_data = ctrl;
+
+ if (ctrl->ops->mmap_file_open)
+ ctrl->ops->mmap_file_open(ctrl, file);
+
return 0;
}
@@ -3124,12 +3128,23 @@ static int nvme_dev_release(struct inode *inode, struct file *file)
return 0;
}
+static int nvme_dev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct nvme_ctrl *ctrl = file->private_data;
+
+ if (!ctrl->ops->mmap_cmb)
+ return -ENODEV;
+
+ return ctrl->ops->mmap_cmb(ctrl, vma);
+}
+
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
.release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .mmap = nvme_dev_mmap,
};
static ssize_t nvme_sysfs_reset(struct device *dev,
@@ -494,6 +494,8 @@ struct nvme_ctrl_ops {
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
+ void (*mmap_file_open)(struct nvme_ctrl *ctrl, struct file *file);
+ int (*mmap_cmb)(struct nvme_ctrl *ctrl, struct vm_area_struct *vma);
};
/*
@@ -2896,6 +2896,22 @@ static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
return dma_pci_p2pdma_supported(dev->dev);
}
+static void nvme_pci_mmap_file_open(struct nvme_ctrl *ctrl,
+ struct file *file)
+{
+ struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+
+ pci_p2pdma_mmap_file_open(pdev, file);
+}
+
+static int nvme_pci_mmap_cmb(struct nvme_ctrl *ctrl,
+ struct vm_area_struct *vma)
+{
+ struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+
+ return pci_mmap_p2pmem(pdev, vma);
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
@@ -2907,6 +2923,8 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
+ .mmap_file_open = nvme_pci_mmap_file_open,
+ .mmap_cmb = nvme_pci_mmap_cmb,
};
static int nvme_dev_map(struct nvme_dev *dev)
Allow userspace to obtain CMB memory by mmaping the controller's char device. The mmap call allocates and returns a hunk of CMB memory, (the offset is ignored) so userspace does not have control over the address within the CMB. A VMA allocated in this way will only be usable by drivers that set FOLL_PCI_P2PDMA when calling GUP. And inter-device support will be checked the first time the pages are mapped for DMA. Currently this is only supported by O_DIRECT to an PCI NVMe device or through the NVMe passthrough IOCTL. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> --- drivers/nvme/host/core.c | 15 +++++++++++++++ drivers/nvme/host/nvme.h | 2 ++ drivers/nvme/host/pci.c | 18 ++++++++++++++++++ 3 files changed, 35 insertions(+)