@@ -3,7 +3,11 @@
* DMABUF System heap exporter
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (C) 2019 Linaro Ltd.
+ * Copyright (C) 2019, 2020 Linaro Ltd.
+ *
+ * Portions based off of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*/
#include <linux/dma-buf.h>
@@ -15,72 +19,321 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/sched/signal.h>
-#include <asm/page.h>
+#include <linux/vmalloc.h>
+
+static struct dma_heap *sys_heap;
-#include "heap-helpers.h"
+struct system_heap_buffer {
+ struct dma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct sg_table sg_table;
+ int vmap_cnt;
+ void *vaddr;
+};
-struct dma_heap *sys_heap;
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+};
-static void system_heap_free(struct heap_helper_buffer *buffer)
+static struct sg_table *dup_sg_table(struct sg_table *table)
{
- pgoff_t pg;
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sgtable_sg(table, sg, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+static int system_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ struct sg_table *table;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = dup_sg_table(&buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void system_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
- for (pg = 0; pg < buffer->pagecount; pg++)
- __free_page(buffer->pages[pg]);
- kfree(buffer->pages);
+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return table;
+}
+
+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table = &buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ struct sg_page_iter piter;
+ int ret;
+
+ for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
+ struct page *page = sg_page_iter_page(&piter);
+
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += PAGE_SIZE;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
+{
+ struct sg_table *table = &buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+ struct sg_page_iter piter;
+ void *vaddr;
+
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_sgtable_page(table, &piter, 0) {
+ WARN_ON(tmp - pages >= npages);
+ *tmp++ = sg_page_iter_page(&piter);
+ }
+
+ vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ vfree(pages);
+
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static void *system_heap_vmap(struct dma_buf *dmabuf)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ vaddr = buffer->vaddr;
+ goto out;
+ }
+
+ vaddr = system_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr))
+ goto out;
+
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+out:
+ mutex_unlock(&buffer->lock);
+
+ return vaddr;
+}
+
+static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i;
+
+ table = &buffer->sg_table;
+ for_each_sgtable_sg(table, sg, i)
+ __free_page(sg_page(sg));
+ sg_free_table(table);
kfree(buffer);
}
+static const struct dma_buf_ops system_heap_buf_ops = {
+ .attach = system_heap_attach,
+ .detach = system_heap_detach,
+ .map_dma_buf = system_heap_map_dma_buf,
+ .unmap_dma_buf = system_heap_unmap_dma_buf,
+ .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = system_heap_dma_buf_end_cpu_access,
+ .mmap = system_heap_mmap,
+ .vmap = system_heap_vmap,
+ .vunmap = system_heap_vunmap,
+ .release = system_heap_dma_buf_release,
+};
+
static int system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
- struct heap_helper_buffer *helper_buffer;
+ struct system_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf;
- int ret = -ENOMEM;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ pgoff_t pagecount;
pgoff_t pg;
+ int i, ret = -ENOMEM;
- helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
- if (!helper_buffer)
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return -ENOMEM;
- init_heap_helper_buffer(helper_buffer, system_heap_free);
- helper_buffer->heap = heap;
- helper_buffer->size = len;
-
- helper_buffer->pagecount = len / PAGE_SIZE;
- helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
- sizeof(*helper_buffer->pages),
- GFP_KERNEL);
- if (!helper_buffer->pages) {
- ret = -ENOMEM;
- goto err0;
- }
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->heap = heap;
+ buffer->len = len;
- for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ table = &buffer->sg_table;
+ pagecount = len / PAGE_SIZE;
+ if (sg_alloc_table(table, pagecount, GFP_KERNEL))
+ goto free_buffer;
+
+ sg = table->sgl;
+ for (pg = 0; pg < pagecount; pg++) {
+ struct page *page;
/*
* Avoid trying to allocate memory if the process
- * has been killed by by SIGKILL
+ * has been killed by SIGKILL
*/
if (fatal_signal_pending(current))
- goto err1;
-
- helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!helper_buffer->pages[pg])
- goto err1;
+ goto free_pages;
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto free_pages;
+ sg_set_page(sg, page, page_size(page), 0);
+ sg = sg_next(sg);
}
/* create the dmabuf */
- dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ exp_info.ops = &system_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
- goto err1;
+ goto free_pages;
}
- helper_buffer->dmabuf = dmabuf;
-
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
@@ -90,12 +343,12 @@ static int system_heap_allocate(struct dma_heap *heap,
return ret;
-err1:
- while (pg > 0)
- __free_page(helper_buffer->pages[--pg]);
- kfree(helper_buffer->pages);
-err0:
- kfree(helper_buffer);
+free_pages:
+ for_each_sgtable_sg(table, sg, i)
+ __free_page(sg_page(sg));
+ sg_free_table(table);
+free_buffer:
+ kfree(buffer);
return ret;
}
@@ -107,7 +360,6 @@ static const struct dma_heap_ops system_heap_ops = {
static int system_heap_create(void)
{
struct dma_heap_export_info exp_info;
- int ret = 0;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
@@ -115,9 +367,9 @@ static int system_heap_create(void)
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
- ret = PTR_ERR(sys_heap);
+ return PTR_ERR(sys_heap);
- return ret;
+ return 0;
}
module_init(system_heap_create);
MODULE_LICENSE("GPL v2");