@@ -285,11 +285,10 @@ void dma_pool_destroy(struct dma_pool *p
list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
if (is_page_busy(page)) {
if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_destroy %s, %p busy\n",
+ dev_err(pool->dev, "%s %s, %p busy\n", __func__,
pool->name, page->vaddr);
else
- pr_err("dma_pool_destroy %s, %p busy\n",
+ pr_err("%s %s, %p busy\n", __func__,
pool->name, page->vaddr);
/* leak the still-in-use consistent memory */
list_del(&page->page_list);
@@ -353,12 +352,11 @@ void *dma_pool_alloc(struct dma_pool *po
if (data[i] == POOL_POISON_FREED)
continue;
if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_alloc %s, %p (corrupted)\n",
- pool->name, retval);
+ dev_err(pool->dev, "%s %s, %p (corrupted)\n",
+ __func__, pool->name, retval);
else
- pr_err("dma_pool_alloc %s, %p (corrupted)\n",
- pool->name, retval);
+ pr_err("%s %s, %p (corrupted)\n",
+ __func__, pool->name, retval);
/*
* Dump the first 4 bytes even if they are not
@@ -414,12 +412,11 @@ void dma_pool_free(struct dma_pool *pool
if (!page) {
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long)dma);
+ dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
+ __func__, pool->name, vaddr, &dma);
else
- pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long)dma);
+ pr_err("%s %s, %p/%pad (bad dma)\n",
+ __func__, pool->name, vaddr, &dma);
return;
}
@@ -430,12 +427,11 @@ void dma_pool_free(struct dma_pool *pool
if ((dma - page->dma) != offset) {
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
- dev_err(pool->dev,
- "dma_pool_free %s, %p (bad vaddr)/%pad\n",
- pool->name, vaddr, &dma);
+ dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
+ __func__, pool->name, vaddr, &dma);
else
- pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
- pool->name, vaddr, &dma);
+ pr_err("%s %s, %p (bad vaddr)/%pad\n",
+ __func__, pool->name, vaddr, &dma);
return;
}
{
@@ -447,11 +443,11 @@ void dma_pool_free(struct dma_pool *pool
}
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
- pool->name, &dma);
+ dev_err(pool->dev, "%s %s, dma %pad already free\n",
+ __func__, pool->name, &dma);
else
- pr_err("dma_pool_free %s, dma %pad already free\n",
- pool->name, &dma);
+ pr_err("%s %s, dma %pad already free\n",
+ __func__, pool->name, &dma);
return;
}
}