@@ -75,14 +75,34 @@ static int dma_buf_release(struct inode *inode, struct file *file)
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
reservation_object_fini(dmabuf->resv);
+ tasklet_kill(&dmabuf->tasklet);
+
module_put(dmabuf->owner);
kfree(dmabuf);
return 0;
}
+static void dmabuf_mmap_tasklet(unsigned long data)
+{
+ struct dma_buf *dmabuf = (struct dma_buf *) data;
+ struct inode *inode = file_inode(dmabuf->file);
+
+ if (!inode)
+ return;
+
+ /* the CPU accessing another device may put the cache in an incoherent state.
+ * Therefore if the mmap succeeds, we forbid any further write access to the
+ * dma-buf until SYNC_START ioctl takes place, which gets back the write
+ * access. */
+ put_write_access(inode);
+
+ inode_dio_wait(inode);
+}
+
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
+ int ret;
if (!is_dma_buf_file(file))
return -EINVAL;
@@ -94,7 +114,11 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
- return dmabuf->ops->mmap(dmabuf, vma);
+ ret = dmabuf->ops->mmap(dmabuf, vma);
+ if (!ret)
+ tasklet_schedule(&dmabuf->tasklet);
+
+ return ret;
}
static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@@ -389,6 +413,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
+ tasklet_init(&dmabuf->tasklet, dmabuf_mmap_tasklet, (unsigned long) dmabuf);
+
return dmabuf;
}
EXPORT_SYMBOL_GPL(dma_buf_export);
@@ -589,6 +615,7 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
+ struct inode *inode = file_inode(dmabuf->file);
int ret = 0;
if (WARN_ON(!dmabuf))
@@ -597,6 +624,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
+ get_write_access(inode);
+
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
@@ -24,6 +24,7 @@
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/interrupt.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -118,6 +119,7 @@ struct dma_buf_ops {
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
+ * @tasklet: tasklet for deferred mmap tasks.
*/
struct dma_buf {
size_t size;
@@ -133,6 +135,7 @@ struct dma_buf {
struct list_head list_node;
void *priv;
struct reservation_object *resv;
+ struct tasklet_struct tasklet;
/* poll support */
wait_queue_head_t poll;