Message ID | 20190906184712.91980-5-john.stultz@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | DMA-BUF Heaps (destaging ION) | expand |
Hi John, I spotted one thing below which might be harmless, but best to check. On Fri, Sep 06, 2019 at 06:47:11PM +0000, John Stultz wrote: > This adds a CMA heap, which allows userspace to allocate > a dma-buf of contiguous memory out of a CMA region. > > This code is an evolution of the Android ION implementation, so > thanks to its original author and maintainters: > Benjamin Gaignard, Laura Abbott, and others! > > Cc: Laura Abbott <labbott@redhat.com> > Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> > Cc: Sumit Semwal <sumit.semwal@linaro.org> > Cc: Liam Mark <lmark@codeaurora.org> > Cc: Pratik Patel <pratikp@codeaurora.org> > Cc: Brian Starkey <Brian.Starkey@arm.com> > Cc: Vincent Donnefort <Vincent.Donnefort@arm.com> > Cc: Sudipto Paul <Sudipto.Paul@arm.com> > Cc: Andrew F. Davis <afd@ti.com> > Cc: Christoph Hellwig <hch@infradead.org> > Cc: Chenbo Feng <fengc@google.com> > Cc: Alistair Strachan <astrachan@google.com> > Cc: Hridya Valsaraju <hridya@google.com> > Cc: dri-devel@lists.freedesktop.org > Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> > Signed-off-by: John Stultz <john.stultz@linaro.org> > --- > v2: > * Switch allocate to return dmabuf fd > * Simplify init code > * Checkpatch fixups > v3: > * Switch to inline function for to_cma_heap() > * Minor cleanups suggested by Brian > * Fold in new registration style from Andrew > * Folded in changes from Andrew to use simplified page list > from the heap helpers > v4: > * Use the fd_flags when creating dmabuf fd (Suggested by > Benjamin) > * Use precalculated pagecount (Suggested by Andrew) > v6: > * Changed variable names to improve clarity, as suggested > by Brian > v7: > * Use newly lower-cased init_heap_helper_buffer helper > * Use new dmabuf export helper > v8: > * Make struct dma_heap_ops const (Suggested by Christoph) > * Condense dma_heap_buffer and heap_helper_buffer (suggested by > Christoph) > * Checkpatch whitespace fixups > --- ... > + > +/* dmabuf heap CMA operations functions */ > +static int cma_heap_allocate(struct dma_heap *heap, > + unsigned long len, > + unsigned long fd_flags, > + unsigned long heap_flags) > +{ > + struct cma_heap *cma_heap = dma_heap_get_data(heap); > + struct heap_helper_buffer *helper_buffer; > + struct page *cma_pages; > + size_t size = PAGE_ALIGN(len); > + unsigned long nr_pages = size >> PAGE_SHIFT; > + unsigned long align = get_order(size); > + struct dma_buf *dmabuf; > + int ret = -ENOMEM; > + pgoff_t pg; > + > + if (align > CONFIG_CMA_ALIGNMENT) > + align = CONFIG_CMA_ALIGNMENT; > + > + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); > + if (!helper_buffer) > + return -ENOMEM; > + > + init_heap_helper_buffer(helper_buffer, cma_heap_free); > + helper_buffer->flags = heap_flags; > + helper_buffer->heap = heap; > + helper_buffer->size = len; > + > + cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); > + if (!cma_pages) > + goto free_buf; > + > + if (PageHighMem(cma_pages)) { > + unsigned long nr_clear_pages = nr_pages; > + struct page *page = cma_pages; > + > + while (nr_clear_pages > 0) { > + void *vaddr = kmap_atomic(page); > + > + memset(vaddr, 0, PAGE_SIZE); > + kunmap_atomic(vaddr); > + page++; > + nr_clear_pages--; > + } > + } else { > + memset(page_address(cma_pages), 0, size); > + } > + > + helper_buffer->pagecount = nr_pages; > + helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, > + sizeof(*helper_buffer->pages), > + GFP_KERNEL); > + if (!helper_buffer->pages) { > + ret = -ENOMEM; > + goto free_cma; > + } > + > + for (pg = 0; pg < helper_buffer->pagecount; pg++) { > + helper_buffer->pages[pg] = &cma_pages[pg]; > + if (!helper_buffer->pages[pg]) Is this ever really possible? If cma_pages is non-NULL (which you check earlier), then only if the pointer arithmetic overflows right? If it's just redundant, then you could remove it (and in that case add my r-b). But maybe you meant to check something else? Cheers, -Brian
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index 205052744169..a5eef06c4226 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM help Choose this option to enable the system dmabuf heap. The system heap is backed by pages from the buddy allocator. If in doubt, say Y. + +config DMABUF_HEAPS_CMA + bool "DMA-BUF CMA Heap" + depends on DMABUF_HEAPS && DMA_CMA + help + Choose this option to enable dma-buf CMA heap. This heap is backed + by the Contiguous Memory Allocator (CMA). If your system has these + regions, you should say Y here. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index d1808eca2581..6e54cdec3da0 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o +obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c new file mode 100644 index 000000000000..b8f67b7c6a5c --- /dev/null +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMABUF CMA heap exporter + * + * Copyright (C) 2012, 2019 Linaro Ltd. + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + */ + +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/dma-heap.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/cma.h> +#include <linux/scatterlist.h> +#include <linux/highmem.h> + +#include "heap-helpers.h" + +struct cma_heap { + struct dma_heap *heap; + struct cma *cma; +}; + +static void cma_heap_free(struct heap_helper_buffer *buffer) +{ + struct cma_heap *cma_heap = dma_heap_get_data(buffer->heap); + unsigned long nr_pages = buffer->pagecount; + struct page *cma_pages = buffer->priv_virt; + + /* free page list */ + kfree(buffer->pages); + /* release memory */ + cma_release(cma_heap->cma, cma_pages, nr_pages); + kfree(buffer); +} + +/* dmabuf heap CMA operations functions */ +static int cma_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + struct cma_heap *cma_heap = dma_heap_get_data(heap); + struct heap_helper_buffer *helper_buffer; + struct page *cma_pages; + size_t size = PAGE_ALIGN(len); + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long align = get_order(size); + struct dma_buf *dmabuf; + int ret = -ENOMEM; + pgoff_t pg; + + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); + if (!helper_buffer) + return -ENOMEM; + + init_heap_helper_buffer(helper_buffer, cma_heap_free); + helper_buffer->flags = heap_flags; + helper_buffer->heap = heap; + helper_buffer->size = len; + + cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + if (!cma_pages) + goto free_buf; + + if (PageHighMem(cma_pages)) { + unsigned long nr_clear_pages = nr_pages; + struct page *page = cma_pages; + + while (nr_clear_pages > 0) { + void *vaddr = kmap_atomic(page); + + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + page++; + nr_clear_pages--; + } + } else { + memset(page_address(cma_pages), 0, size); + } + + helper_buffer->pagecount = nr_pages; + helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, + sizeof(*helper_buffer->pages), + GFP_KERNEL); + if (!helper_buffer->pages) { + ret = -ENOMEM; + goto free_cma; + } + + for (pg = 0; pg < helper_buffer->pagecount; pg++) { + helper_buffer->pages[pg] = &cma_pages[pg]; + if (!helper_buffer->pages[pg]) + goto free_pages; + } + + /* create the dmabuf */ + dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); + goto free_pages; + } + + helper_buffer->dmabuf = dmabuf; + helper_buffer->priv_virt = cma_pages; + + ret = dma_buf_fd(dmabuf, fd_flags); + if (ret < 0) { + dma_buf_put(dmabuf); + /* just return, as put will call release and that will free */ + return ret; + } + + return ret; + +free_pages: + kfree(helper_buffer->pages); +free_cma: + cma_release(cma_heap->cma, cma_pages, nr_pages); +free_buf: + kfree(helper_buffer); + return ret; +} + +static const struct dma_heap_ops cma_heap_ops = { + .allocate = cma_heap_allocate, +}; + +static int __add_cma_heap(struct cma *cma, void *data) +{ + struct cma_heap *cma_heap; + struct dma_heap_export_info exp_info; + + cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); + if (!cma_heap) + return -ENOMEM; + cma_heap->cma = cma; + + exp_info.name = cma_get_name(cma); + exp_info.ops = &cma_heap_ops; + exp_info.priv = cma_heap; + + cma_heap->heap = dma_heap_add(&exp_info); + if (IS_ERR(cma_heap->heap)) { + int ret = PTR_ERR(cma_heap->heap); + + kfree(cma_heap); + return ret; + } + + return 0; +} + +static int add_cma_heaps(void) +{ + cma_for_each_area(__add_cma_heap, NULL); + return 0; +} +device_initcall(add_cma_heaps);