Message ID | 1551819273-640-5-git-send-email-john.stultz@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | DMA-BUF Heaps (destaging ION) | expand |
Le mar. 5 mars 2019 à 21:54, John Stultz <john.stultz@linaro.org> a écrit : > > This adds a CMA heap, which allows userspace to allocate > a dma-buf of contiguous memory out of a CMA region. > > This code is an evolution of the Android ION implementation, so > thanks to its original author and maintainters: > Benjamin Gaignard, Laura Abbott, and others! > > Cc: Laura Abbott <labbott@redhat.com> > Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> > Cc: Greg KH <gregkh@linuxfoundation.org> > Cc: Sumit Semwal <sumit.semwal@linaro.org> > Cc: Liam Mark <lmark@codeaurora.org> > Cc: Brian Starkey <Brian.Starkey@arm.com> > Cc: Andrew F. Davis <afd@ti.com> > Cc: Chenbo Feng <fengc@google.com> > Cc: Alistair Strachan <astrachan@google.com> > Cc: dri-devel@lists.freedesktop.org > Signed-off-by: John Stultz <john.stultz@linaro.org> > --- > v2: > * Switch allocate to return dmabuf fd > * Simplify init code > * Checkpatch fixups > --- > drivers/dma-buf/heaps/Kconfig | 8 ++ > drivers/dma-buf/heaps/Makefile | 1 + > drivers/dma-buf/heaps/cma_heap.c | 164 +++++++++++++++++++++++++++++++++++++++ > 3 files changed, 173 insertions(+) > create mode 100644 drivers/dma-buf/heaps/cma_heap.c > > diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig > index 2050527..a5eef06 100644 > --- a/drivers/dma-buf/heaps/Kconfig > +++ b/drivers/dma-buf/heaps/Kconfig > @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM > help > Choose this option to enable the system dmabuf heap. The system heap > is backed by pages from the buddy allocator. If in doubt, say Y. > + > +config DMABUF_HEAPS_CMA > + bool "DMA-BUF CMA Heap" > + depends on DMABUF_HEAPS && DMA_CMA > + help > + Choose this option to enable dma-buf CMA heap. This heap is backed > + by the Contiguous Memory Allocator (CMA). If your system has these > + regions, you should say Y here. > diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile > index d1808ec..6e54cde 100644 > --- a/drivers/dma-buf/heaps/Makefile > +++ b/drivers/dma-buf/heaps/Makefile > @@ -1,3 +1,4 @@ > # SPDX-License-Identifier: GPL-2.0 > obj-y += heap-helpers.o > obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o > +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o > diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c > new file mode 100644 > index 0000000..33c18ec > --- /dev/null > +++ b/drivers/dma-buf/heaps/cma_heap.c > @@ -0,0 +1,164 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * DMABUF CMA heap exporter > + * > + * Copyright (C) 2012, 2019 Linaro Ltd. > + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. > + */ > + > +#include <linux/device.h> > +#include <linux/dma-buf.h> > +#include <linux/dma-heap.h> > +#include <linux/slab.h> > +#include <linux/errno.h> > +#include <linux/err.h> > +#include <linux/cma.h> > +#include <linux/scatterlist.h> > +#include <linux/highmem.h> > + > +#include "heap-helpers.h" > + > +struct cma_heap { > + struct dma_heap heap; > + struct cma *cma; > +}; > + > + > +#define to_cma_heap(x) container_of(x, struct cma_heap, heap) Even if I had write this macro years ago, now I would prefer to have a static inline function to be able to check the types. with that: Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> > + > + > +static void cma_heap_free(struct heap_helper_buffer *buffer) > +{ > + struct cma_heap *cma_heap = to_cma_heap(buffer->heap_buffer.heap); > + struct page *pages = buffer->priv_virt; > + unsigned long nr_pages; > + > + nr_pages = PAGE_ALIGN(buffer->heap_buffer.size) >> PAGE_SHIFT; > + > + /* release memory */ > + cma_release(cma_heap->cma, pages, nr_pages); > + /* release sg table */ > + sg_free_table(buffer->sg_table); > + kfree(buffer->sg_table); > + kfree(buffer); > +} > + > +/* dmabuf heap CMA operations functions */ > +static int cma_heap_allocate(struct dma_heap *heap, > + unsigned long len, > + unsigned long flags) > +{ > + struct cma_heap *cma_heap = to_cma_heap(heap); > + struct heap_helper_buffer *helper_buffer; > + struct sg_table *table; > + struct page *pages; > + size_t size = PAGE_ALIGN(len); > + unsigned long nr_pages = size >> PAGE_SHIFT; > + unsigned long align = get_order(size); > + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); > + struct dma_buf *dmabuf; > + int ret = -ENOMEM; > + > + if (align > CONFIG_CMA_ALIGNMENT) > + align = CONFIG_CMA_ALIGNMENT; > + > + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); > + if (!helper_buffer) > + return -ENOMEM; > + > + INIT_HEAP_HELPER_BUFFER(helper_buffer, cma_heap_free); > + helper_buffer->heap_buffer.flags = flags; > + helper_buffer->heap_buffer.heap = heap; > + helper_buffer->heap_buffer.size = len; > + > + > + pages = cma_alloc(cma_heap->cma, nr_pages, align, false); > + if (!pages) > + goto free_buf; > + > + if (PageHighMem(pages)) { > + unsigned long nr_clear_pages = nr_pages; > + struct page *page = pages; > + > + while (nr_clear_pages > 0) { > + void *vaddr = kmap_atomic(page); > + > + memset(vaddr, 0, PAGE_SIZE); > + kunmap_atomic(vaddr); > + page++; > + nr_clear_pages--; > + } > + } else { > + memset(page_address(pages), 0, size); > + } > + > + table = kmalloc(sizeof(*table), GFP_KERNEL); > + if (!table) > + goto free_cma; > + > + ret = sg_alloc_table(table, 1, GFP_KERNEL); > + if (ret) > + goto free_table; > + > + sg_set_page(table->sgl, pages, size, 0); > + > + /* create the dmabuf */ > + exp_info.ops = &heap_helper_ops; > + exp_info.size = len; > + exp_info.flags = O_RDWR; > + exp_info.priv = &helper_buffer->heap_buffer; > + dmabuf = dma_buf_export(&exp_info); > + if (IS_ERR(dmabuf)) { > + ret = PTR_ERR(dmabuf); > + goto free_table; > + } > + > + helper_buffer->heap_buffer.dmabuf = dmabuf; > + helper_buffer->priv_virt = pages; > + helper_buffer->sg_table = table; > + > + ret = dma_buf_fd(dmabuf, O_CLOEXEC); > + if (ret < 0) { > + dma_buf_put(dmabuf); > + /* just return, as put will call release and that will free */ > + return ret; > + } > + > + return ret; > +free_table: > + kfree(table); > +free_cma: > + cma_release(cma_heap->cma, pages, nr_pages); > +free_buf: > + kfree(helper_buffer); > + return ret; > +} > + > +static struct dma_heap_ops cma_heap_ops = { > + .allocate = cma_heap_allocate, > +}; > + > +static int __add_cma_heaps(struct cma *cma, void *data) > +{ > + struct cma_heap *cma_heap; > + > + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); > + > + if (!cma_heap) > + return -ENOMEM; > + > + cma_heap->heap.name = cma_get_name(cma); > + cma_heap->heap.ops = &cma_heap_ops; > + cma_heap->cma = cma; > + > + dma_heap_add(&cma_heap->heap); > + > + return 0; > +} > + > +static int add_cma_heaps(void) > +{ > + cma_for_each_area(__add_cma_heaps, NULL); > + return 0; > +} > +device_initcall(add_cma_heaps); > -- > 2.7.4 >
On Fri, Mar 15, 2019 at 2:06 AM Christoph Hellwig <hch@infradead.org> wrote: > > On Tue, Mar 05, 2019 at 12:54:32PM -0800, John Stultz wrote: > > This adds a CMA heap, which allows userspace to allocate > > a dma-buf of contiguous memory out of a CMA region. > > With my previous suggestion of DMA API usage you'd get CMA support for > free in the system one instead of all this duplicate code.. Hey Christoph! Thanks for the review here! I'm still digesting your comments, so apologies if I misunderstand. On the point here, unless you're referring to some earlier suggestion on a previous discussion (and not the system heap feedback), part of the reason there are separate heaps is to allow Android to be able to optimize where the allocations are coming from to best match the use case. So they only want to allocate CMA backed dmabufs when the use case has devices that require it, or they may even want to have separate a CMA region reserved for a specific use case (like camera buffers). Similarly for any future heap for allocating secure dma-bufs. So while in the implementation we can consolidate the code more, but we'd still probably want to have separate heaps. Does that make sense? Am I misinterpreting your feedback? thanks -john
On Tue, Mar 05, 2019 at 12:54:32PM -0800, John Stultz wrote: > This adds a CMA heap, which allows userspace to allocate > a dma-buf of contiguous memory out of a CMA region. > > This code is an evolution of the Android ION implementation, so > thanks to its original author and maintainters: > Benjamin Gaignard, Laura Abbott, and others! > > Cc: Laura Abbott <labbott@redhat.com> > Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> > Cc: Greg KH <gregkh@linuxfoundation.org> > Cc: Sumit Semwal <sumit.semwal@linaro.org> > Cc: Liam Mark <lmark@codeaurora.org> > Cc: Brian Starkey <Brian.Starkey@arm.com> > Cc: Andrew F. Davis <afd@ti.com> > Cc: Chenbo Feng <fengc@google.com> > Cc: Alistair Strachan <astrachan@google.com> > Cc: dri-devel@lists.freedesktop.org > Signed-off-by: John Stultz <john.stultz@linaro.org> > --- > v2: > * Switch allocate to return dmabuf fd > * Simplify init code > * Checkpatch fixups > --- > drivers/dma-buf/heaps/Kconfig | 8 ++ > drivers/dma-buf/heaps/Makefile | 1 + > drivers/dma-buf/heaps/cma_heap.c | 164 +++++++++++++++++++++++++++++++++++++++ > 3 files changed, 173 insertions(+) > create mode 100644 drivers/dma-buf/heaps/cma_heap.c > > diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig > index 2050527..a5eef06 100644 > --- a/drivers/dma-buf/heaps/Kconfig > +++ b/drivers/dma-buf/heaps/Kconfig > @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM > help > Choose this option to enable the system dmabuf heap. The system heap > is backed by pages from the buddy allocator. If in doubt, say Y. > + > +config DMABUF_HEAPS_CMA > + bool "DMA-BUF CMA Heap" > + depends on DMABUF_HEAPS && DMA_CMA > + help > + Choose this option to enable dma-buf CMA heap. This heap is backed > + by the Contiguous Memory Allocator (CMA). If your system has these > + regions, you should say Y here. > diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile > index d1808ec..6e54cde 100644 > --- a/drivers/dma-buf/heaps/Makefile > +++ b/drivers/dma-buf/heaps/Makefile > @@ -1,3 +1,4 @@ > # SPDX-License-Identifier: GPL-2.0 > obj-y += heap-helpers.o > obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o > +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o > diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c > new file mode 100644 > index 0000000..33c18ec > --- /dev/null > +++ b/drivers/dma-buf/heaps/cma_heap.c > @@ -0,0 +1,164 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * DMABUF CMA heap exporter > + * > + * Copyright (C) 2012, 2019 Linaro Ltd. > + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. > + */ > + > +#include <linux/device.h> > +#include <linux/dma-buf.h> > +#include <linux/dma-heap.h> > +#include <linux/slab.h> > +#include <linux/errno.h> > +#include <linux/err.h> > +#include <linux/cma.h> > +#include <linux/scatterlist.h> > +#include <linux/highmem.h> > + > +#include "heap-helpers.h" > + > +struct cma_heap { > + struct dma_heap heap; > + struct cma *cma; > +}; > + > + extra line > +#define to_cma_heap(x) container_of(x, struct cma_heap, heap) > + > + extra line > +static void cma_heap_free(struct heap_helper_buffer *buffer) > +{ > + struct cma_heap *cma_heap = to_cma_heap(buffer->heap_buffer.heap); > + struct page *pages = buffer->priv_virt; > + unsigned long nr_pages; > + > + nr_pages = PAGE_ALIGN(buffer->heap_buffer.size) >> PAGE_SHIFT; As you align at alloc time, I don't think the PAGE_ALIGN is really necessary here. > + > + /* release memory */ > + cma_release(cma_heap->cma, pages, nr_pages); > + /* release sg table */ > + sg_free_table(buffer->sg_table); > + kfree(buffer->sg_table); > + kfree(buffer); > +} > + > +/* dmabuf heap CMA operations functions */ > +static int cma_heap_allocate(struct dma_heap *heap, > + unsigned long len, > + unsigned long flags) > +{ > + struct cma_heap *cma_heap = to_cma_heap(heap); > + struct heap_helper_buffer *helper_buffer; > + struct sg_table *table; > + struct page *pages; > + size_t size = PAGE_ALIGN(len); > + unsigned long nr_pages = size >> PAGE_SHIFT; > + unsigned long align = get_order(size); > + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); > + struct dma_buf *dmabuf; > + int ret = -ENOMEM; > + > + if (align > CONFIG_CMA_ALIGNMENT) > + align = CONFIG_CMA_ALIGNMENT; > + > + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); > + if (!helper_buffer) > + return -ENOMEM; > + > + INIT_HEAP_HELPER_BUFFER(helper_buffer, cma_heap_free); > + helper_buffer->heap_buffer.flags = flags; > + helper_buffer->heap_buffer.heap = heap; > + helper_buffer->heap_buffer.size = len; > + > + extra line > + pages = cma_alloc(cma_heap->cma, nr_pages, align, false); > + if (!pages) > + goto free_buf; > + > + if (PageHighMem(pages)) { > + unsigned long nr_clear_pages = nr_pages; > + struct page *page = pages; > + > + while (nr_clear_pages > 0) { > + void *vaddr = kmap_atomic(page); > + > + memset(vaddr, 0, PAGE_SIZE); > + kunmap_atomic(vaddr); > + page++; > + nr_clear_pages--; > + } > + } else { > + memset(page_address(pages), 0, size); > + } > + > + table = kmalloc(sizeof(*table), GFP_KERNEL); > + if (!table) > + goto free_cma; > + > + ret = sg_alloc_table(table, 1, GFP_KERNEL); > + if (ret) > + goto free_table; > + > + sg_set_page(table->sgl, pages, size, 0); > + > + /* create the dmabuf */ > + exp_info.ops = &heap_helper_ops; > + exp_info.size = len; > + exp_info.flags = O_RDWR; > + exp_info.priv = &helper_buffer->heap_buffer; > + dmabuf = dma_buf_export(&exp_info); > + if (IS_ERR(dmabuf)) { > + ret = PTR_ERR(dmabuf); > + goto free_table; > + } > + > + helper_buffer->heap_buffer.dmabuf = dmabuf; > + helper_buffer->priv_virt = pages; > + helper_buffer->sg_table = table; > + > + ret = dma_buf_fd(dmabuf, O_CLOEXEC); > + if (ret < 0) { > + dma_buf_put(dmabuf); > + /* just return, as put will call release and that will free */ > + return ret; > + } > + > + return ret; > +free_table: > + kfree(table); > +free_cma: > + cma_release(cma_heap->cma, pages, nr_pages); > +free_buf: > + kfree(helper_buffer); > + return ret; > +} > + > +static struct dma_heap_ops cma_heap_ops = { > + .allocate = cma_heap_allocate, > +}; > + > +static int __add_cma_heaps(struct cma *cma, void *data) nit: __add_cma_heap (not plural) seems more accurate. Whatever you decide for the above, you can add my r-b. Thanks, -Brian > +{ > + struct cma_heap *cma_heap; > + > + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); > + > + if (!cma_heap) > + return -ENOMEM; > + > + cma_heap->heap.name = cma_get_name(cma); > + cma_heap->heap.ops = &cma_heap_ops; > + cma_heap->cma = cma; > + > + dma_heap_add(&cma_heap->heap); > + > + return 0; > +} > + > +static int add_cma_heaps(void) > +{ > + cma_for_each_area(__add_cma_heaps, NULL); > + return 0; > +} > +device_initcall(add_cma_heaps); > -- > 2.7.4 >
On Wed, Mar 6, 2019 at 8:05 AM Benjamin Gaignard <benjamin.gaignard@linaro.org> wrote: > Le mar. 5 mars 2019 à 21:54, John Stultz <john.stultz@linaro.org> a écrit : > > +#define to_cma_heap(x) container_of(x, struct cma_heap, heap) > > Even if I had write this macro years ago, now I would prefer to have a > static inline function > to be able to check the types. > > with that: > Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Thanks for the suggestion! I've reworked that and the other container_of macro I had in the patch series to be inline functions. thanks again! -john
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index 2050527..a5eef06 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM help Choose this option to enable the system dmabuf heap. The system heap is backed by pages from the buddy allocator. If in doubt, say Y. + +config DMABUF_HEAPS_CMA + bool "DMA-BUF CMA Heap" + depends on DMABUF_HEAPS && DMA_CMA + help + Choose this option to enable dma-buf CMA heap. This heap is backed + by the Contiguous Memory Allocator (CMA). If your system has these + regions, you should say Y here. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index d1808ec..6e54cde 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c new file mode 100644 index 0000000..33c18ec --- /dev/null +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMABUF CMA heap exporter + * + * Copyright (C) 2012, 2019 Linaro Ltd. + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + */ + +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/dma-heap.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/cma.h> +#include <linux/scatterlist.h> +#include <linux/highmem.h> + +#include "heap-helpers.h" + +struct cma_heap { + struct dma_heap heap; + struct cma *cma; +}; + + +#define to_cma_heap(x) container_of(x, struct cma_heap, heap) + + +static void cma_heap_free(struct heap_helper_buffer *buffer) +{ + struct cma_heap *cma_heap = to_cma_heap(buffer->heap_buffer.heap); + struct page *pages = buffer->priv_virt; + unsigned long nr_pages; + + nr_pages = PAGE_ALIGN(buffer->heap_buffer.size) >> PAGE_SHIFT; + + /* release memory */ + cma_release(cma_heap->cma, pages, nr_pages); + /* release sg table */ + sg_free_table(buffer->sg_table); + kfree(buffer->sg_table); + kfree(buffer); +} + +/* dmabuf heap CMA operations functions */ +static int cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long flags) +{ + struct cma_heap *cma_heap = to_cma_heap(heap); + struct heap_helper_buffer *helper_buffer; + struct sg_table *table; + struct page *pages; + size_t size = PAGE_ALIGN(len); + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long align = get_order(size); + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct dma_buf *dmabuf; + int ret = -ENOMEM; + + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); + if (!helper_buffer) + return -ENOMEM; + + INIT_HEAP_HELPER_BUFFER(helper_buffer, cma_heap_free); + helper_buffer->heap_buffer.flags = flags; + helper_buffer->heap_buffer.heap = heap; + helper_buffer->heap_buffer.size = len; + + + pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + if (!pages) + goto free_buf; + + if (PageHighMem(pages)) { + unsigned long nr_clear_pages = nr_pages; + struct page *page = pages; + + while (nr_clear_pages > 0) { + void *vaddr = kmap_atomic(page); + + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + page++; + nr_clear_pages--; + } + } else { + memset(page_address(pages), 0, size); + } + + table = kmalloc(sizeof(*table), GFP_KERNEL); + if (!table) + goto free_cma; + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto free_table; + + sg_set_page(table->sgl, pages, size, 0); + + /* create the dmabuf */ + exp_info.ops = &heap_helper_ops; + exp_info.size = len; + exp_info.flags = O_RDWR; + exp_info.priv = &helper_buffer->heap_buffer; + dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); + goto free_table; + } + + helper_buffer->heap_buffer.dmabuf = dmabuf; + helper_buffer->priv_virt = pages; + helper_buffer->sg_table = table; + + ret = dma_buf_fd(dmabuf, O_CLOEXEC); + if (ret < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + return ret; + } + + return ret; +free_table: + kfree(table); +free_cma: + cma_release(cma_heap->cma, pages, nr_pages); +free_buf: + kfree(helper_buffer); + return ret; +} + +static struct dma_heap_ops cma_heap_ops = { + .allocate = cma_heap_allocate, +}; + +static int __add_cma_heaps(struct cma *cma, void *data) +{ + struct cma_heap *cma_heap; + + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); + + if (!cma_heap) + return -ENOMEM; + + cma_heap->heap.name = cma_get_name(cma); + cma_heap->heap.ops = &cma_heap_ops; + cma_heap->cma = cma; + + dma_heap_add(&cma_heap->heap); + + return 0; +} + +static int add_cma_heaps(void) +{ + cma_for_each_area(__add_cma_heaps, NULL); + return 0; +} +device_initcall(add_cma_heaps);
This adds a CMA heap, which allows userspace to allocate a dma-buf of contiguous memory out of a CMA region. This code is an evolution of the Android ION implementation, so thanks to its original author and maintainters: Benjamin Gaignard, Laura Abbott, and others! Cc: Laura Abbott <labbott@redhat.com> Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Liam Mark <lmark@codeaurora.org> Cc: Brian Starkey <Brian.Starkey@arm.com> Cc: Andrew F. Davis <afd@ti.com> Cc: Chenbo Feng <fengc@google.com> Cc: Alistair Strachan <astrachan@google.com> Cc: dri-devel@lists.freedesktop.org Signed-off-by: John Stultz <john.stultz@linaro.org> --- v2: * Switch allocate to return dmabuf fd * Simplify init code * Checkpatch fixups --- drivers/dma-buf/heaps/Kconfig | 8 ++ drivers/dma-buf/heaps/Makefile | 1 + drivers/dma-buf/heaps/cma_heap.c | 164 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 173 insertions(+) create mode 100644 drivers/dma-buf/heaps/cma_heap.c