diff mbox series

[RFC,2/5] cma-heap: Allow registration of custom cma heaps

Message ID 2255866ee9e81136a7099376b34b8305758ec9f0.1738228114.git.florent.tomasin@arm.com (mailing list archive)
State New
Headers show
Series [RFC,1/5] dt-bindings: dma: Add CMA Heap bindings | expand

Commit Message

Florent Tomasin Jan. 30, 2025, 1:08 p.m. UTC
This patch introduces a cma-heap probe function, allowing
users to register custom cma heaps in the device tree.

A "memory-region" is bound to the cma heap at probe time
allowing allocation of DMA buffers from that heap.

Use cases:
- registration of carved out secure heaps. Some devices
  are implementing secure memory by reserving a specific
  memory regions for that purpose. For example, this is the
  case of platforms making use of early version of
  ARM TrustZone.
- registration of multiple memory regions at different
  locations for efficiency or HW integration reasons.
  For example, a peripheral may expect to share data at a
  specific location in RAM. This information could have been
  programmed by a FW prior to the kernel boot.

* Zeroing of CMA heap allocation:
In the case of secure CMA heaps used along with ARM TrustZone,
the zeroing of the secure memory could result in a bus fault
if performed with `kmap_atomic()` or `page_address()`.
To prevent such scenario, the zeroing of the pages is done
using a virtual pointer acquired from `vmap()` using:
`pgprot_writecombine(PAGE_KERNEL)` as argument.

* Idea of improvement:
This patch could have an impact on the performance of devices
as a result of using `pgprot_writecombine(PAGE_KERNEL)`.
It could be prevented by allowing control of this argument
via a parameter of some sort. The driver could then use
or not `pgprot_writecombine(PAGE_KERNEL)` according to
the use case defined by the system integrator.

* Note to the reviewers:
The patch was used for the development of the protected mode
feature in Panthor CSF kernel driver and is not initially thought
to land in the Linux kernel. It is mostly relevant if someone
wants to reproduce the environment of testing. Please, raise
interest if you think the patch has value in the Linux kernel.

Signed-off-by: Florent Tomasin <florent.tomasin@arm.com>
---
 drivers/dma-buf/heaps/cma_heap.c | 120 +++++++++++++++++++++----------
 1 file changed, 81 insertions(+), 39 deletions(-)

Comments

Maxime Ripard Jan. 30, 2025, 1:34 p.m. UTC | #1
Hi,

On Thu, Jan 30, 2025 at 01:08:58PM +0000, Florent Tomasin wrote:
> This patch introduces a cma-heap probe function, allowing
> users to register custom cma heaps in the device tree.
> 
> A "memory-region" is bound to the cma heap at probe time
> allowing allocation of DMA buffers from that heap.
> 
> Use cases:
> - registration of carved out secure heaps. Some devices
>   are implementing secure memory by reserving a specific
>   memory regions for that purpose. For example, this is the
>   case of platforms making use of early version of
>   ARM TrustZone.

In such a case, the CMA heap would de-facto become un-mappable for
userspace, right?

> - registration of multiple memory regions at different
>   locations for efficiency or HW integration reasons.
>   For example, a peripheral may expect to share data at a
>   specific location in RAM. This information could have been
>   programmed by a FW prior to the kernel boot.

How would you differentiate between them?

Maxime
diff mbox series

Patch

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 9512d050563a..8f17221311fd 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -18,6 +18,9 @@ 
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -186,6 +189,7 @@  static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 
 	vma->vm_ops = &dma_heap_vm_ops;
 	vma->vm_private_data = buffer;
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 	return 0;
 }
@@ -194,7 +198,7 @@  static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
 {
 	void *vaddr;
 
-	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 	if (!vaddr)
 		return ERR_PTR(-ENOMEM);
 
@@ -286,6 +290,7 @@  static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 	struct page *cma_pages;
 	struct dma_buf *dmabuf;
 	int ret = -ENOMEM;
+	void *vaddr;
 	pgoff_t pg;
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -303,29 +308,6 @@  static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 	if (!cma_pages)
 		goto free_buffer;
 
-	/* Clear the cma pages */
-	if (PageHighMem(cma_pages)) {
-		unsigned long nr_clear_pages = pagecount;
-		struct page *page = cma_pages;
-
-		while (nr_clear_pages > 0) {
-			void *vaddr = kmap_local_page(page);
-
-			memset(vaddr, 0, PAGE_SIZE);
-			kunmap_local(vaddr);
-			/*
-			 * Avoid wasting time zeroing memory if the process
-			 * has been killed by SIGKILL.
-			 */
-			if (fatal_signal_pending(current))
-				goto free_cma;
-			page++;
-			nr_clear_pages--;
-		}
-	} else {
-		memset(page_address(cma_pages), 0, size);
-	}
-
 	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
 	if (!buffer->pages) {
 		ret = -ENOMEM;
@@ -335,6 +317,14 @@  static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
 	for (pg = 0; pg < pagecount; pg++)
 		buffer->pages[pg] = &cma_pages[pg];
 
+	/* Clear the cma pages */
+	vaddr = vmap(buffer->pages, pagecount, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+	if (!vaddr)
+		goto free_pages;
+
+	memset(vaddr, 0, size);
+	vunmap(vaddr);
+
 	buffer->cma_pages = cma_pages;
 	buffer->heap = cma_heap;
 	buffer->pagecount = pagecount;
@@ -366,17 +356,79 @@  static const struct dma_heap_ops cma_heap_ops = {
 	.allocate = cma_heap_allocate,
 };
 
-static int __init __add_cma_heap(struct cma *cma, void *data)
+static int cma_heap_probe(struct platform_device *pdev)
 {
+	struct dma_heap_export_info *exp_info;
+	struct cma_heap *cma_heap;
+	int ret;
+
+	exp_info = devm_kzalloc(&pdev->dev, sizeof(*exp_info), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(exp_info))
+		return -ENOMEM;
+
+	cma_heap = devm_kzalloc(&pdev->dev, sizeof(*cma_heap), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(cma_heap))
+		return -ENOMEM;
+
+	ret = of_reserved_mem_device_init(&pdev->dev);
+	if (ret)
+		return ret;
+
+	cma_heap->cma = dev_get_cma_area(&pdev->dev);
+	if (!cma_heap->cma) {
+		ret = -EINVAL;
+		goto error_reserved_mem;
+	}
+
+	exp_info->name = cma_get_name(cma_heap->cma);
+	exp_info->ops = &cma_heap_ops;
+	exp_info->priv = cma_heap;
+
+	cma_heap->heap = dma_heap_add(exp_info);
+	if (IS_ERR(cma_heap->heap)) {
+		ret = PTR_ERR(cma_heap->heap);
+		goto error_reserved_mem;
+	}
+
+	return 0;
+
+error_reserved_mem:
+	of_reserved_mem_device_release(&pdev->dev);
+
+	return ret;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "linux,cma" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver cma_heap_driver = {
+	.probe = cma_heap_probe,
+	.driver = {
+		.name = "linux,cma",
+		.of_match_table = dt_match,
+	},
+};
+
+static int __init cma_heap_init(void)
+{
+	struct cma *cma_area = dev_get_cma_area(NULL);
 	struct cma_heap *cma_heap;
 	struct dma_heap_export_info exp_info;
 
+	if (!cma_area)
+		return -EINVAL;
+
+	/* Add default CMA heap */
 	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
 	if (!cma_heap)
 		return -ENOMEM;
-	cma_heap->cma = cma;
 
-	exp_info.name = cma_get_name(cma);
+	cma_heap->cma = cma_area;
+
+	exp_info.name = cma_get_name(cma_area);
 	exp_info.ops = &cma_heap_ops;
 	exp_info.priv = cma_heap;
 
@@ -388,18 +440,8 @@  static int __init __add_cma_heap(struct cma *cma, void *data)
 		return ret;
 	}
 
-	return 0;
+	return platform_driver_register(&cma_heap_driver);
 }
 
-static int __init add_default_cma_heap(void)
-{
-	struct cma *default_cma = dev_get_cma_area(NULL);
-	int ret = 0;
-
-	if (default_cma)
-		ret = __add_cma_heap(default_cma, NULL);
-
-	return ret;
-}
-module_init(add_default_cma_heap);
+module_init(cma_heap_init);
 MODULE_DESCRIPTION("DMA-BUF CMA Heap");