@@ -386,6 +386,43 @@ static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
put_unused_fd(fd);
return ret;
}
+
+static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
+ const void __user *user_data)
+{
+ struct dma_buf_import_sync_file arg;
+ struct dma_fence *fence;
+ enum dma_resv_usage usage;
+ int ret = 0;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(arg.fd);
+ if (!fence)
+ return -EINVAL;
+
+ usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
+ DMA_RESV_USAGE_READ;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (!ret)
+ dma_resv_add_fence(dmabuf->resv, fence, usage);
+
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_fence_put(fence);
+
+ return ret;
+}
#endif
static long dma_buf_ioctl(struct file *file,
@@ -434,6 +471,8 @@ static long dma_buf_ioctl(struct file *file,
#if IS_ENABLED(CONFIG_SYNC_FILE)
case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
+ case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
+ return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
#endif
default:
@@ -96,6 +96,24 @@ struct dma_buf_sync {
* dma-buf for waiting later instead of waiting immediately. This is
* useful for modern graphics APIs such as Vulkan which assume an explicit
* synchronization model but still need to inter-operate with dma-buf.
+ *
+ * The intended usage pattern is the following:
+ *
+ * 1. Export a sync_file with flags corresponding to the expected GPU usage
+ * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
+ *
+ * 2. Submit rendering work which uses the dma-buf. The work should wait on
+ * the exported sync file before rendering and produce another sync_file
+ * when complete.
+ *
+ * 3. Import the rendering-complete sync_file into the dma-buf with flags
+ * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
+ *
+ * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
+ * the above is not a single atomic operation. If userspace wants to ensure
+ * ordering via these fences, it is the respnosibility of userspace to use
+ * locks or other mechanisms to ensure that no other context adds fences or
+ * submits work between steps 1 and 3 above.
*/
struct dma_buf_export_sync_file {
/**
@@ -119,6 +137,36 @@ struct dma_buf_export_sync_file {
__s32 fd;
};
+/**
+ * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
+ * sync_file into a dma-buf for the purposes of implicit synchronization
+ * with other dma-buf consumers. This allows clients using explicitly
+ * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
+ * which expect implicit synchronization such as OpenGL or most media
+ * drivers/video.
+ */
+struct dma_buf_import_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * this inserts the sync_file as a read-only fence. Any subsequent
+ * implicitly synchronized writes to this dma-buf will wait on this
+ * fence but reads will not.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
+ * write fence. All subsequent implicitly synchronized access to
+ * this dma-buf will wait on this fence.
+ */
+ __u32 flags;
+ /** @fd: Sync file descriptor */
+ __s32 fd;
+};
+
#define DMA_BUF_BASE 'b'
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
@@ -129,5 +177,6 @@ struct dma_buf_export_sync_file {
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
+#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
#endif