@@ -296,7 +296,7 @@ struct dma_pool {
unsigned long *bitmap;
unsigned long nr_pages;
void *vaddr;
- struct page *page;
+ struct page **pages;
};
static struct dma_pool atomic_pool = {
@@ -335,12 +335,16 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
+ struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
+ size_t size = nr_pages * sizeof(struct page *);
- bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ size += bitmap_size;
+ bitmap = kzalloc(size, GFP_KERNEL);
if (!bitmap)
goto no_bitmap;
+ pages = (void *)bitmap + bitmap_size;
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
@@ -348,9 +352,14 @@ static int __init atomic_pool_init(void)
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL);
if (ptr) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
- pool->page = page;
+ pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
@@ -481,7 +490,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
- *ret_page = pool->page + pageno;
+ *ret_page = pool->pages[pageno];
} else {
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
"Please increase it with coherent_pool= kernel parameter!\n",
struct page **pages is necessary to align with non atomic path in __iommu_get_pages(). atomic_pool() has the intialized **pages instead of just *page. Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com> --- arch/arm/mm/dma-mapping.c | 17 +++++++++++++---- 1 files changed, 13 insertions(+), 4 deletions(-)