diff mbox series

[v2,4/4] dma-buf: heaps: add chunk heap to dmabuf heaps

Message ID 20201201175144.3996569-5-minchan@kernel.org (mailing list archive)
State New, archived
Headers show
Series Chunk Heap Support on DMA-HEAP | expand

Commit Message

Minchan Kim Dec. 1, 2020, 5:51 p.m. UTC
From: Hyesoo Yu <hyesoo.yu@samsung.com>

This patch supports chunk heap that allocates the buffers that
arranged into a list a fixed size chunks taken from CMA.

The chunk heap doesn't use heap-helper although it can remove
duplicated code since heap-helper is under deprecated process.[1]

NOTE: This patch only adds the default CMA heap to allocate chunk
pages. We will add another CMA memory regions to the dmabuf heaps
interface with a later patch (which requires a dt binding)

[1] https://lore.kernel.org/patchwork/patch/1336002

Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 drivers/dma-buf/heaps/Kconfig      |  15 +
 drivers/dma-buf/heaps/Makefile     |   1 +
 drivers/dma-buf/heaps/chunk_heap.c | 429 +++++++++++++++++++++++++++++
 3 files changed, 445 insertions(+)
 create mode 100644 drivers/dma-buf/heaps/chunk_heap.c

Comments

John Stultz Dec. 1, 2020, 7:48 p.m. UTC | #1
On Tue, Dec 1, 2020 at 9:51 AM Minchan Kim <minchan@kernel.org> wrote:

Thanks for reworking and resending this!

...
> +static int __init chunk_heap_init(void)
> +{
> +       struct cma *default_cma = dev_get_cma_area(NULL);
> +       struct dma_heap_export_info exp_info;
> +       struct chunk_heap *chunk_heap;
> +
> +       if (!default_cma)
> +               return 0;
> +
> +       chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
> +       if (!chunk_heap)
> +               return -ENOMEM;
> +
> +       chunk_heap->order = CHUNK_HEAP_ORDER;
> +       chunk_heap->cma = default_cma;
> +
> +       exp_info.name = cma_get_name(default_cma);

So, this would create a chunk heap name with the default CMA name,
which would be indistinguishable from the heap name used for the plain
CMA heap.

Probably a good idea to prefix it with "chunk-" so the heap device
names are unique?

thanks
-john
Minchan Kim Dec. 1, 2020, 10:55 p.m. UTC | #2
On Tue, Dec 01, 2020 at 11:48:15AM -0800, John Stultz wrote:
> On Tue, Dec 1, 2020 at 9:51 AM Minchan Kim <minchan@kernel.org> wrote:
> 
> Thanks for reworking and resending this!
> 
> ...
> > +static int __init chunk_heap_init(void)
> > +{
> > +       struct cma *default_cma = dev_get_cma_area(NULL);
> > +       struct dma_heap_export_info exp_info;
> > +       struct chunk_heap *chunk_heap;
> > +
> > +       if (!default_cma)
> > +               return 0;
> > +
> > +       chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
> > +       if (!chunk_heap)
> > +               return -ENOMEM;
> > +
> > +       chunk_heap->order = CHUNK_HEAP_ORDER;
> > +       chunk_heap->cma = default_cma;
> > +
> > +       exp_info.name = cma_get_name(default_cma);
> 
> So, this would create a chunk heap name with the default CMA name,
> which would be indistinguishable from the heap name used for the plain
> CMA heap.
> 
> Probably a good idea to prefix it with "chunk-" so the heap device
> names are unique?

That will give an impression to user that they are using different CMA
area but that's not true. IMHO, let's be honest at this moment.
When DT binding with CMA is landing down, it could provide unique name.
Thought?
John Stultz Dec. 1, 2020, 11:38 p.m. UTC | #3
On Tue, Dec 1, 2020 at 2:55 PM Minchan Kim <minchan@kernel.org> wrote:
> On Tue, Dec 01, 2020 at 11:48:15AM -0800, John Stultz wrote:
> > On Tue, Dec 1, 2020 at 9:51 AM Minchan Kim <minchan@kernel.org> wrote:
> >
> > Thanks for reworking and resending this!
> >
> > ...
> > > +static int __init chunk_heap_init(void)
> > > +{
> > > +       struct cma *default_cma = dev_get_cma_area(NULL);
> > > +       struct dma_heap_export_info exp_info;
> > > +       struct chunk_heap *chunk_heap;
> > > +
> > > +       if (!default_cma)
> > > +               return 0;
> > > +
> > > +       chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
> > > +       if (!chunk_heap)
> > > +               return -ENOMEM;
> > > +
> > > +       chunk_heap->order = CHUNK_HEAP_ORDER;
> > > +       chunk_heap->cma = default_cma;
> > > +
> > > +       exp_info.name = cma_get_name(default_cma);
> >
> > So, this would create a chunk heap name with the default CMA name,
> > which would be indistinguishable from the heap name used for the plain
> > CMA heap.
> >
> > Probably a good idea to prefix it with "chunk-" so the heap device
> > names are unique?
>
> That will give an impression to user that they are using different CMA
> area but that's not true. IMHO, let's be honest at this moment.

I disagree.  The dmabuf heaps provide an abstraction for allocating a
type of memory, and while your heap is pulling from CMA, you aren't
"just" allocating CMA as the existing CMA heap would suffice for that.

Since you need a slightly different method to allocate high order
pages in bulk, we really should have a unique way to name the
allocator interface. That's why I'd suggest the "chunk-" prefix to the
heap name.

thanks
-john
Minchan Kim Dec. 2, 2020, 12:13 a.m. UTC | #4
On Tue, Dec 01, 2020 at 03:38:14PM -0800, John Stultz wrote:
> On Tue, Dec 1, 2020 at 2:55 PM Minchan Kim <minchan@kernel.org> wrote:
> > On Tue, Dec 01, 2020 at 11:48:15AM -0800, John Stultz wrote:
> > > On Tue, Dec 1, 2020 at 9:51 AM Minchan Kim <minchan@kernel.org> wrote:
> > >
> > > Thanks for reworking and resending this!
> > >
> > > ...
> > > > +static int __init chunk_heap_init(void)
> > > > +{
> > > > +       struct cma *default_cma = dev_get_cma_area(NULL);
> > > > +       struct dma_heap_export_info exp_info;
> > > > +       struct chunk_heap *chunk_heap;
> > > > +
> > > > +       if (!default_cma)
> > > > +               return 0;
> > > > +
> > > > +       chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
> > > > +       if (!chunk_heap)
> > > > +               return -ENOMEM;
> > > > +
> > > > +       chunk_heap->order = CHUNK_HEAP_ORDER;
> > > > +       chunk_heap->cma = default_cma;
> > > > +
> > > > +       exp_info.name = cma_get_name(default_cma);
> > >
> > > So, this would create a chunk heap name with the default CMA name,
> > > which would be indistinguishable from the heap name used for the plain
> > > CMA heap.
> > >
> > > Probably a good idea to prefix it with "chunk-" so the heap device
> > > names are unique?
> >
> > That will give an impression to user that they are using different CMA
> > area but that's not true. IMHO, let's be honest at this moment.
> 
> I disagree.  The dmabuf heaps provide an abstraction for allocating a
> type of memory, and while your heap is pulling from CMA, you aren't
> "just" allocating CMA as the existing CMA heap would suffice for that.
> 
> Since you need a slightly different method to allocate high order
> pages in bulk, we really should have a unique way to name the
> allocator interface. That's why I'd suggest the "chunk-" prefix to the
> heap name.

Got it. How about this? 

diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c
index 0277707a93a9..36e189d0b73d 100644
--- a/drivers/dma-buf/heaps/chunk_heap.c
+++ b/drivers/dma-buf/heaps/chunk_heap.c
@@ -410,7 +410,7 @@ static int __init chunk_heap_init(void)
        chunk_heap->order = CHUNK_HEAP_ORDER;
        chunk_heap->cma = default_cma;

-       exp_info.name = cma_get_name(default_cma);
+       exp_info.name = "cma-chunk-heap";
        exp_info.ops = &chunk_heap_ops;
        exp_info.priv = chunk_heap;
John Stultz Dec. 2, 2020, 12:33 a.m. UTC | #5
On Tue, Dec 1, 2020 at 4:13 PM Minchan Kim <minchan@kernel.org> wrote:
>
> On Tue, Dec 01, 2020 at 03:38:14PM -0800, John Stultz wrote:
> > On Tue, Dec 1, 2020 at 2:55 PM Minchan Kim <minchan@kernel.org> wrote:
> > > On Tue, Dec 01, 2020 at 11:48:15AM -0800, John Stultz wrote:
> > > > On Tue, Dec 1, 2020 at 9:51 AM Minchan Kim <minchan@kernel.org> wrote:
> > > >
> > > > Thanks for reworking and resending this!
> > > >
> > > > ...
> > > > > +static int __init chunk_heap_init(void)
> > > > > +{
> > > > > +       struct cma *default_cma = dev_get_cma_area(NULL);
> > > > > +       struct dma_heap_export_info exp_info;
> > > > > +       struct chunk_heap *chunk_heap;
> > > > > +
> > > > > +       if (!default_cma)
> > > > > +               return 0;
> > > > > +
> > > > > +       chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
> > > > > +       if (!chunk_heap)
> > > > > +               return -ENOMEM;
> > > > > +
> > > > > +       chunk_heap->order = CHUNK_HEAP_ORDER;
> > > > > +       chunk_heap->cma = default_cma;
> > > > > +
> > > > > +       exp_info.name = cma_get_name(default_cma);
> > > >
> > > > So, this would create a chunk heap name with the default CMA name,
> > > > which would be indistinguishable from the heap name used for the plain
> > > > CMA heap.
> > > >
> > > > Probably a good idea to prefix it with "chunk-" so the heap device
> > > > names are unique?
> > >
> > > That will give an impression to user that they are using different CMA
> > > area but that's not true. IMHO, let's be honest at this moment.
> >
> > I disagree.  The dmabuf heaps provide an abstraction for allocating a
> > type of memory, and while your heap is pulling from CMA, you aren't
> > "just" allocating CMA as the existing CMA heap would suffice for that.
> >
> > Since you need a slightly different method to allocate high order
> > pages in bulk, we really should have a unique way to name the
> > allocator interface. That's why I'd suggest the "chunk-" prefix to the
> > heap name.
>
> Got it. How about this?
>
> diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c
> index 0277707a93a9..36e189d0b73d 100644
> --- a/drivers/dma-buf/heaps/chunk_heap.c
> +++ b/drivers/dma-buf/heaps/chunk_heap.c
> @@ -410,7 +410,7 @@ static int __init chunk_heap_init(void)
>         chunk_heap->order = CHUNK_HEAP_ORDER;
>         chunk_heap->cma = default_cma;
>
> -       exp_info.name = cma_get_name(default_cma);
> +       exp_info.name = "cma-chunk-heap";

That's still a bit general for the default cma (which can be named
differently). I think including cma name is important, just adding the
chunk prefix might be best.

So something like
  sprintf(buf, "chunk-%s", cma_get_name(default_cma));
  exp_info.name = buf;

thanks
-john
Minchan Kim Dec. 2, 2020, 12:57 a.m. UTC | #6
On Tue, Dec 01, 2020 at 04:33:14PM -0800, John Stultz wrote:
> On Tue, Dec 1, 2020 at 4:13 PM Minchan Kim <minchan@kernel.org> wrote:
> >
> > On Tue, Dec 01, 2020 at 03:38:14PM -0800, John Stultz wrote:
> > > On Tue, Dec 1, 2020 at 2:55 PM Minchan Kim <minchan@kernel.org> wrote:
> > > > On Tue, Dec 01, 2020 at 11:48:15AM -0800, John Stultz wrote:
> > > > > On Tue, Dec 1, 2020 at 9:51 AM Minchan Kim <minchan@kernel.org> wrote:
> > > > >
> > > > > Thanks for reworking and resending this!
> > > > >
> > > > > ...
> > > > > > +static int __init chunk_heap_init(void)
> > > > > > +{
> > > > > > +       struct cma *default_cma = dev_get_cma_area(NULL);
> > > > > > +       struct dma_heap_export_info exp_info;
> > > > > > +       struct chunk_heap *chunk_heap;
> > > > > > +
> > > > > > +       if (!default_cma)
> > > > > > +               return 0;
> > > > > > +
> > > > > > +       chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
> > > > > > +       if (!chunk_heap)
> > > > > > +               return -ENOMEM;
> > > > > > +
> > > > > > +       chunk_heap->order = CHUNK_HEAP_ORDER;
> > > > > > +       chunk_heap->cma = default_cma;
> > > > > > +
> > > > > > +       exp_info.name = cma_get_name(default_cma);
> > > > >
> > > > > So, this would create a chunk heap name with the default CMA name,
> > > > > which would be indistinguishable from the heap name used for the plain
> > > > > CMA heap.
> > > > >
> > > > > Probably a good idea to prefix it with "chunk-" so the heap device
> > > > > names are unique?
> > > >
> > > > That will give an impression to user that they are using different CMA
> > > > area but that's not true. IMHO, let's be honest at this moment.
> > >
> > > I disagree.  The dmabuf heaps provide an abstraction for allocating a
> > > type of memory, and while your heap is pulling from CMA, you aren't
> > > "just" allocating CMA as the existing CMA heap would suffice for that.
> > >
> > > Since you need a slightly different method to allocate high order
> > > pages in bulk, we really should have a unique way to name the
> > > allocator interface. That's why I'd suggest the "chunk-" prefix to the
> > > heap name.
> >
> > Got it. How about this?
> >
> > diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c
> > index 0277707a93a9..36e189d0b73d 100644
> > --- a/drivers/dma-buf/heaps/chunk_heap.c
> > +++ b/drivers/dma-buf/heaps/chunk_heap.c
> > @@ -410,7 +410,7 @@ static int __init chunk_heap_init(void)
> >         chunk_heap->order = CHUNK_HEAP_ORDER;
> >         chunk_heap->cma = default_cma;
> >
> > -       exp_info.name = cma_get_name(default_cma);
> > +       exp_info.name = "cma-chunk-heap";
> 
> That's still a bit general for the default cma (which can be named
> differently). I think including cma name is important, just adding the
> chunk prefix might be best.
> 
> So something like
>   sprintf(buf, "chunk-%s", cma_get_name(default_cma));
>   exp_info.name = buf;

No problem. Will do that in respoin.
Other than that, can you give any Acked-by or Reviewed-by to save
iteration?
Christoph Hellwig Dec. 2, 2020, 1:54 p.m. UTC | #7
On Tue, Dec 01, 2020 at 09:51:44AM -0800, Minchan Kim wrote:
> From: Hyesoo Yu <hyesoo.yu@samsung.com>
> 
> This patch supports chunk heap that allocates the buffers that
> arranged into a list a fixed size chunks taken from CMA.
> 
> The chunk heap doesn't use heap-helper although it can remove
> duplicated code since heap-helper is under deprecated process.[1]
> 
> NOTE: This patch only adds the default CMA heap to allocate chunk
> pages. We will add another CMA memory regions to the dmabuf heaps
> interface with a later patch (which requires a dt binding)

This new heap seems to largely duplicate the exsting cma_heap.c
file.  Why can't you reuse the code and allow creating different
heaps with different chunk sizes or max numbers of segments?

> +config DMABUF_HEAPS_CHUNK_ORDER
> +	int "Chunk page order for dmabuf chunk heap"
> +	default 4
> +	depends on DMABUF_HEAPS_CHUNK
> +	help
> +	  Set page order of fixed chunk size to allocate from CMA.

Using a config option for this is just broken.  It needs to be runtime
or at very least boot time / DT controllable.

> + * ION Memory Allocator chunk heap exporter

This comment seems wrong.
diff mbox series

Patch

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06c4226..9153f83afed7 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,3 +12,18 @@  config DMABUF_HEAPS_CMA
 	  Choose this option to enable dma-buf CMA heap. This heap is backed
 	  by the Contiguous Memory Allocator (CMA). If your system has these
 	  regions, you should say Y here.
+
+config DMABUF_HEAPS_CHUNK
+	tristate "DMA-BUF CHUNK Heap"
+	depends on DMABUF_HEAPS && DMA_CMA
+	help
+	  Choose this option to enable dma-buf CHUNK heap. This heap is backed
+	  by the Contiguous Memory Allocator (CMA) and allocates the buffers that
+	  arranged into a list of fixed size chunks taken from CMA.
+
+config DMABUF_HEAPS_CHUNK_ORDER
+	int "Chunk page order for dmabuf chunk heap"
+	default 4
+	depends on DMABUF_HEAPS_CHUNK
+	help
+	  Set page order of fixed chunk size to allocate from CMA.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 974467791032..8faa6cfdc0c5 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -1,3 +1,4 @@ 
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CHUNK)	+= chunk_heap.o
diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c
new file mode 100644
index 000000000000..0277707a93a9
--- /dev/null
+++ b/drivers/dma-buf/heaps/chunk_heap.c
@@ -0,0 +1,429 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ION Memory Allocator chunk heap exporter
+ *
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * Author: <hyesoo.yu@samsung.com> for Samsung Electronics.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-map-ops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of.h>
+
+struct chunk_heap {
+	struct dma_heap *heap;
+	unsigned int order;
+	struct cma *cma;
+};
+
+struct chunk_heap_buffer {
+	struct chunk_heap *heap;
+	struct list_head attachments;
+	struct mutex lock;
+	struct sg_table sg_table;
+	unsigned long len;
+	int vmap_cnt;
+	void *vaddr;
+};
+
+struct chunk_heap_attachment {
+	struct device *dev;
+	struct sg_table *table;
+	struct list_head list;
+	bool mapped;
+};
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+	struct sg_table *new_table;
+	int ret, i;
+	struct scatterlist *sg, *new_sg;
+
+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+	if (!new_table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+	if (ret) {
+		kfree(new_table);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	new_sg = new_table->sgl;
+	for_each_sgtable_sg(table, sg, i) {
+		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+		new_sg = sg_next(new_sg);
+	}
+
+	return new_table;
+}
+
+static int chunk_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	struct chunk_heap_attachment *a;
+	struct sg_table *table;
+
+	a = kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return -ENOMEM;
+
+	table = dup_sg_table(&buffer->sg_table);
+	if (IS_ERR(table)) {
+		kfree(a);
+		return -ENOMEM;
+	}
+
+	a->table = table;
+	a->dev = attachment->dev;
+	INIT_LIST_HEAD(&a->list);
+	a->mapped = false;
+
+	attachment->priv = a;
+
+	mutex_lock(&buffer->lock);
+	list_add(&a->list, &buffer->attachments);
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static void chunk_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	struct chunk_heap_attachment *a = attachment->priv;
+
+	mutex_lock(&buffer->lock);
+	list_del(&a->list);
+	mutex_unlock(&buffer->lock);
+
+	sg_free_table(a->table);
+	kfree(a->table);
+	kfree(a);
+}
+
+static struct sg_table *chunk_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+					       enum dma_data_direction direction)
+{
+	struct chunk_heap_attachment *a = attachment->priv;
+	struct sg_table *table = a->table;
+	int ret;
+
+	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+	if (ret)
+		return ERR_PTR(ret);
+
+	a->mapped = true;
+	return table;
+}
+
+static void chunk_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				     struct sg_table *table,
+				     enum dma_data_direction direction)
+{
+	struct chunk_heap_attachment *a = attachment->priv;
+
+	a->mapped = false;
+	dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int chunk_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+						enum dma_data_direction direction)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	struct chunk_heap_attachment *a;
+
+	mutex_lock(&buffer->lock);
+
+	if (buffer->vmap_cnt)
+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+	list_for_each_entry(a, &buffer->attachments, list) {
+		if (!a->mapped)
+			continue;
+		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+	}
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static int chunk_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+					      enum dma_data_direction direction)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	struct chunk_heap_attachment *a;
+
+	mutex_lock(&buffer->lock);
+
+	if (buffer->vmap_cnt)
+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+	list_for_each_entry(a, &buffer->attachments, list) {
+		if (!a->mapped)
+			continue;
+		dma_sync_sgtable_for_device(a->dev, a->table, direction);
+	}
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static int chunk_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	struct sg_table *table = &buffer->sg_table;
+	unsigned long addr = vma->vm_start;
+	struct sg_page_iter piter;
+	int ret;
+
+	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
+		struct page *page = sg_page_iter_page(&piter);
+
+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
+				      vma->vm_page_prot);
+		if (ret)
+			return ret;
+		addr += PAGE_SIZE;
+		if (addr >= vma->vm_end)
+			return 0;
+	}
+	return 0;
+}
+
+static void *chunk_heap_do_vmap(struct chunk_heap_buffer *buffer)
+{
+	struct sg_table *table = &buffer->sg_table;
+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
+	struct page **tmp = pages;
+	struct sg_page_iter piter;
+	void *vaddr;
+
+	if (!pages)
+		return ERR_PTR(-ENOMEM);
+
+	for_each_sgtable_page(table, &piter, 0) {
+		WARN_ON(tmp - pages >= npages);
+		*tmp++ = sg_page_iter_page(&piter);
+	}
+
+	vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+	vfree(pages);
+
+	if (!vaddr)
+		return ERR_PTR(-ENOMEM);
+
+	return vaddr;
+}
+
+static int chunk_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	int ret = 0;
+	void *vaddr;
+
+	mutex_lock(&buffer->lock);
+	if (buffer->vmap_cnt) {
+		vaddr = buffer->vaddr;
+		goto done;
+	}
+
+	vaddr = chunk_heap_do_vmap(buffer);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
+		goto err;
+	}
+
+	buffer->vaddr = vaddr;
+done:
+	buffer->vmap_cnt++;
+	dma_buf_map_set_vaddr(map, vaddr);
+err:
+	mutex_unlock(&buffer->lock);
+
+	return ret;
+}
+
+static void chunk_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	if (!--buffer->vmap_cnt) {
+		vunmap(buffer->vaddr);
+		buffer->vaddr = NULL;
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static void chunk_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct chunk_heap_buffer *buffer = dmabuf->priv;
+	struct chunk_heap *chunk_heap = buffer->heap;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int i;
+
+	table = &buffer->sg_table;
+	for_each_sgtable_sg(table, sg, i)
+		cma_release(chunk_heap->cma, sg_page(sg), 1 << chunk_heap->order);
+	sg_free_table(table);
+	kfree(buffer);
+}
+
+static const struct dma_buf_ops chunk_heap_buf_ops = {
+	.attach = chunk_heap_attach,
+	.detach = chunk_heap_detach,
+	.map_dma_buf = chunk_heap_map_dma_buf,
+	.unmap_dma_buf = chunk_heap_unmap_dma_buf,
+	.begin_cpu_access = chunk_heap_dma_buf_begin_cpu_access,
+	.end_cpu_access = chunk_heap_dma_buf_end_cpu_access,
+	.mmap = chunk_heap_mmap,
+	.vmap = chunk_heap_vmap,
+	.vunmap = chunk_heap_vunmap,
+	.release = chunk_heap_dma_buf_release,
+};
+
+static int chunk_heap_allocate(struct dma_heap *heap, unsigned long len,
+			       unsigned long fd_flags, unsigned long heap_flags)
+{
+	struct chunk_heap *chunk_heap = dma_heap_get_drvdata(heap);
+	struct chunk_heap_buffer *buffer;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *dmabuf;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	struct page **pages;
+	unsigned int chunk_size = PAGE_SIZE << chunk_heap->order;
+	unsigned int count, alloced = 0;
+	unsigned int num_retry = 5;
+	int ret = -ENOMEM;
+	pgoff_t pg;
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return ret;
+
+	INIT_LIST_HEAD(&buffer->attachments);
+	mutex_init(&buffer->lock);
+	buffer->heap = chunk_heap;
+	buffer->len = ALIGN(len, chunk_size);
+	count = buffer->len / chunk_size;
+
+	pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
+	if (!pages)
+		goto err_pages;
+
+	while (num_retry--) {
+		unsigned long nr_pages;
+
+		ret = cma_alloc_bulk(chunk_heap->cma, chunk_heap->order,
+				     num_retry ? true : false,
+				     chunk_heap->order, count - alloced,
+				     pages + alloced, &nr_pages);
+		alloced += nr_pages;
+		if (alloced == count)
+			break;
+		if (ret != -EBUSY)
+			break;
+
+	}
+	if (ret < 0)
+		goto err_alloc;
+
+	table = &buffer->sg_table;
+	if (sg_alloc_table(table, count, GFP_KERNEL))
+		goto err_alloc;
+
+	sg = table->sgl;
+	for (pg = 0; pg < count; pg++) {
+		sg_set_page(sg, pages[pg], chunk_size, 0);
+		sg = sg_next(sg);
+	}
+
+	exp_info.ops = &chunk_heap_buf_ops;
+	exp_info.size = buffer->len;
+	exp_info.flags = fd_flags;
+	exp_info.priv = buffer;
+	dmabuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dmabuf)) {
+		ret = PTR_ERR(dmabuf);
+		goto err_export;
+	}
+	kvfree(pages);
+
+	ret = dma_buf_fd(dmabuf, fd_flags);
+	if (ret < 0) {
+		dma_buf_put(dmabuf);
+		return ret;
+	}
+
+	return 0;
+err_export:
+	sg_free_table(table);
+err_alloc:
+	for (pg = 0; pg < alloced; pg++)
+		cma_release(chunk_heap->cma, pages[pg], 1 << chunk_heap->order);
+	kvfree(pages);
+err_pages:
+	kfree(buffer);
+
+	return ret;
+}
+
+static const struct dma_heap_ops chunk_heap_ops = {
+	.allocate = chunk_heap_allocate,
+};
+
+#ifdef CONFIG_DMABUF_HEAPS_CHUNK_ORDER
+#define CHUNK_HEAP_ORDER (CONFIG_DMABUF_HEAPS_CHUNK_ORDER)
+#else
+#define CHUNK_HEAP_ORDER (0)
+#endif
+
+static int __init chunk_heap_init(void)
+{
+	struct cma *default_cma = dev_get_cma_area(NULL);
+	struct dma_heap_export_info exp_info;
+	struct chunk_heap *chunk_heap;
+
+	if (!default_cma)
+		return 0;
+
+	chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
+	if (!chunk_heap)
+		return -ENOMEM;
+
+	chunk_heap->order = CHUNK_HEAP_ORDER;
+	chunk_heap->cma = default_cma;
+
+	exp_info.name = cma_get_name(default_cma);
+	exp_info.ops = &chunk_heap_ops;
+	exp_info.priv = chunk_heap;
+
+	chunk_heap->heap = dma_heap_add(&exp_info);
+	if (IS_ERR(chunk_heap->heap)) {
+		int ret = PTR_ERR(chunk_heap->heap);
+
+		kfree(chunk_heap);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(chunk_heap_init);
+MODULE_DESCRIPTION("DMA-BUF Chunk Heap");
+MODULE_LICENSE("GPL v2");