@@ -501,6 +501,15 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
return ptr;
}
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct dma_pool *pool = &atomic_pool;
+ struct page **pages = pool->pages;
+ int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
+
+ return pages + offs;
+}
+
static bool __in_atomic_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
@@ -1184,6 +1193,9 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr;
Support atomic allocation in __iommu_get_pages(). Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com> --- arch/arm/mm/dma-mapping.c | 12 ++++++++++++ 1 files changed, 12 insertions(+), 0 deletions(-)