@@ -2138,9 +2138,23 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags))
+ if (XE_IOCTL_DBG(xe, args->flags &
+ ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
return -EINVAL;
+ if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
+ if (XE_IOCTL_DBG(xe, args->handle))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
+ return -EINVAL;
+
+ BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
+ SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
+ args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
+ return 0;
+ }
+
gem_obj = drm_gem_object_lookup(file, args->handle);
if (XE_IOCTL_DBG(xe, !gem_obj))
return -ENOENT;
@@ -63,6 +63,8 @@
#define XE_BO_PROPS_INVALID (-1)
+#define XE_PCI_BARRIER_MMAP_OFFSET (0x50 << XE_PTE_SHIFT)
+
struct sg_table;
struct xe_bo *xe_bo_alloc(void);
@@ -231,12 +231,113 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
#define xe_drm_compat_ioctl NULL
#endif
+static void barrier_open(struct vm_area_struct *vma)
+{
+ drm_dev_get(vma->vm_private_data);
+}
+
+static void barrier_close(struct vm_area_struct *vma)
+{
+ drm_dev_put(vma->vm_private_data);
+}
+
+static void barrier_release_dummy_page(struct drm_device *dev, void *res)
+{
+ struct page *dummy_page = (struct page *)res;
+
+ __free_page(dummy_page);
+}
+
+static vm_fault_t barrier_fault(struct vm_fault *vmf)
+{
+ struct drm_device *dev = vmf->vma->vm_private_data;
+ struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ pgprot_t prot;
+ int idx;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ if (drm_dev_enter(dev, &idx)) {
+ unsigned long pfn;
+
+#define LAST_DB_PAGE_OFFSET 0x7ff001
+ pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
+ LAST_DB_PAGE_OFFSET);
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
+ pgprot_noncached(prot));
+ drm_dev_exit(idx);
+ } else {
+ struct page *page;
+
+ /* Allocate new dummy page to map all the VA range in this VMA to it*/
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ /* Set the page to be freed using drmm release action */
+ if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
+ prot);
+ }
+
+ return ret;
+}
+
+static const struct vm_operations_struct vm_ops_barrier = {
+ .open = barrier_open,
+ .close = barrier_close,
+ .fault = barrier_fault,
+};
+
+static int xe_pci_barrier_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+
+ if (vma->vm_end - vma->vm_start > SZ_4K)
+ return -EINVAL;
+
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ if (vma->vm_flags & (VM_READ | VM_EXEC))
+ return -EINVAL;
+
+ vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
+ vma->vm_ops = &vm_ops_barrier;
+ vma->vm_private_data = dev;
+ drm_dev_get(vma->vm_private_data);
+
+ return 0;
+}
+
+static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ switch (vma->vm_pgoff) {
+ case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
+ return xe_pci_barrier_mmap(filp, vma);
+ }
+
+ return drm_gem_mmap(filp, vma);
+}
+
static const struct file_operations xe_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release_noglobal,
.unlocked_ioctl = xe_drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = xe_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = xe_drm_compat_ioctl,
@@ -811,6 +811,32 @@ struct drm_xe_gem_create {
/**
* struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ *
+ * The @flags can be:
+ * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
+ * for use in mmap ioctl. Writing to the returned mmap address will generate a
+ * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
+ * to VRAM which would also add overhead), acting like an MI_MEM_FENCE
+ * instruction.
+ *
+ * Note: The mmap size can be at most 4K, due to HW limitations. As a result
+ * this interface is only supported on CPU architectures that support 4K page
+ * size. The mmap_offset ioctl will detect this and gracefully return an
+ * error, where userspace is expected to have a different fallback method for
+ * triggering a barrier.
+ *
+ * Roughly the usage would be as follows:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_gem_mmap_offset mmo = {
+ * .handle = 0, // must be set to 0
+ * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
+ * };
+ *
+ * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo);
+ * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset);
+ * map[i] = 0xdeadbeaf; // issue barrier
*/
struct drm_xe_gem_mmap_offset {
/** @extensions: Pointer to the first extension struct, if any */
@@ -819,7 +845,8 @@ struct drm_xe_gem_mmap_offset {
/** @handle: Handle for the object being mapped. */
__u32 handle;
- /** @flags: Must be zero */
+#define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0)
+ /** @flags: Flags */
__u32 flags;
/** @offset: The fake offset to use for subsequent mmap call */