diff mbox

[4/6] dma-buf: DRAFT: Make SYNC mandatory when userspace mmap

Message ID 1440631758-30303-5-git-send-email-tiago.vignatti@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tiago Vignatti Aug. 26, 2015, 11:29 p.m. UTC
This is my (failed) attempt to make the SYNC_* mandatory. I've tried to revoke
write access to the mapped region until begin_cpu_access is called.

The tasklet schedule order seems alright but the whole logic is not working and
I guess it's something related to the fs trick I'm trying to do with the
put{,get}_write_access pair...

Not sure if I should follow this direction though. I've spent much time already
on it!. What do you think?

Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jérôme Glisse <jglisse@redhat.com>

---
 drivers/dma-buf/dma-buf.c | 31 ++++++++++++++++++++++++++++++-
 include/linux/dma-buf.h   |  3 +++
 2 files changed, 33 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 9a298bd..06cb37b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -75,14 +75,34 @@  static int dma_buf_release(struct inode *inode, struct file *file)
 	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
 		reservation_object_fini(dmabuf->resv);
 
+	tasklet_kill(&dmabuf->tasklet);
+
 	module_put(dmabuf->owner);
 	kfree(dmabuf);
 	return 0;
 }
 
+static void dmabuf_mmap_tasklet(unsigned long data)
+{
+	struct dma_buf *dmabuf = (struct dma_buf *) data;
+	struct inode *inode = file_inode(dmabuf->file);
+
+	if (!inode)
+		return;
+
+	/* the CPU accessing another device may put the cache in an incoherent state.
+	 * Therefore if the mmap succeeds, we forbid any further write access to the
+	 * dma-buf until SYNC_START ioctl takes place, which gets back the write
+	 * access. */
+	put_write_access(inode);
+
+	inode_dio_wait(inode);
+}
+
 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 {
 	struct dma_buf *dmabuf;
+	int ret;
 
 	if (!is_dma_buf_file(file))
 		return -EINVAL;
@@ -94,7 +114,11 @@  static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 	    dmabuf->size >> PAGE_SHIFT)
 		return -EINVAL;
 
-	return dmabuf->ops->mmap(dmabuf, vma);
+	ret = dmabuf->ops->mmap(dmabuf, vma);
+	if (!ret)
+		tasklet_schedule(&dmabuf->tasklet);
+
+	return ret;
 }
 
 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@@ -389,6 +413,8 @@  struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 	list_add(&dmabuf->list_node, &db_list.head);
 	mutex_unlock(&db_list.lock);
 
+	tasklet_init(&dmabuf->tasklet, dmabuf_mmap_tasklet, (unsigned long) dmabuf);
+
 	return dmabuf;
 }
 EXPORT_SYMBOL_GPL(dma_buf_export);
@@ -589,6 +615,7 @@  EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 			     enum dma_data_direction direction)
 {
+	struct inode *inode = file_inode(dmabuf->file);
 	int ret = 0;
 
 	if (WARN_ON(!dmabuf))
@@ -597,6 +624,8 @@  int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	if (dmabuf->ops->begin_cpu_access)
 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
+	get_write_access(inode);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 532108e..0359792 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -24,6 +24,7 @@ 
 #ifndef __DMA_BUF_H__
 #define __DMA_BUF_H__
 
+#include <linux/interrupt.h>
 #include <linux/file.h>
 #include <linux/err.h>
 #include <linux/scatterlist.h>
@@ -118,6 +119,7 @@  struct dma_buf_ops {
  * @list_node: node for dma_buf accounting and debugging.
  * @priv: exporter specific private data for this buffer object.
  * @resv: reservation object linked to this dma-buf
+ * @tasklet: tasklet for deferred mmap tasks.
  */
 struct dma_buf {
 	size_t size;
@@ -133,6 +135,7 @@  struct dma_buf {
 	struct list_head list_node;
 	void *priv;
 	struct reservation_object *resv;
+	struct tasklet_struct tasklet;
 
 	/* poll support */
 	wait_queue_head_t poll;